Merge pull request #7 from cgaonker/master

Adding and modifying MD5, TLS, IGMP, DHCP test cases.
diff --git a/.gitignore b/.gitignore
index ba74660..cf55afb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -55,3 +55,9 @@
 
 # PyBuilder
 target/
+sub*.db
+*~
+*.swp
+*.jpeg
+*.gv
+*ascpc*@*#
diff --git a/Vagrantfile b/Vagrantfile
new file mode 100644
index 0000000..1abd52c
--- /dev/null
+++ b/Vagrantfile
@@ -0,0 +1,35 @@
+# -*- mode: ruby -*-
+# vi: set ft=ruby :
+
+Vagrant.configure(2) do |config|
+
+  if (/cygwin|mswin|mingw|bccwin|wince|emx/ =~ RUBY_PLATFORM) != nil
+    config.vm.synced_folder ".", "/cord-tester", mount_options: ["dmode=700,fmode=600"]
+  else
+    config.vm.synced_folder ".", "/cord-tester"
+  end
+
+  config.vm.define "cordtest" do |d|
+    d.vm.box = "ubuntu/trusty64"
+    d.vm.hostname = "cordtest"
+    d.vm.network "private_network", ip: "10.100.198.202"
+    d.vm.provision :shell, path: "src/test/setup/prerequisites.sh"
+    d.vm.provider "virtualbox" do |v|
+      v.memory = 3000
+    end
+  end
+
+  config.vm.define "prod" do |d|
+    d.vm.box = "ubuntu/trusty64"
+    d.vm.hostname = "prod"
+    d.vm.network "private_network", ip: "10.100.198.203"
+    d.vm.provider "virtualbox" do |v|
+      v.memory = 2048
+    end
+  end
+
+  if Vagrant.has_plugin?("vagrant-cachier")
+    config.cache.scope = :box
+  end
+
+end
diff --git a/docs/running.md b/docs/running.md
index e69de29..7ff3dd5 100644
--- a/docs/running.md
+++ b/docs/running.md
@@ -0,0 +1,47 @@
+# Documentation
+
+Use the cord-setup.sh bash script to run the tests for your test environment.
+
+* In order to build a fresh test container before running the test, use the -b option like below:
+
+```
+sudo ./cord-setup.sh -o onos:latest -a freeradius:latest -b onos:runtest -t dhcp-igmp
+```
+
+* The above would build a docker container called onos:runtest before running the test.
+Otherwise it tries to spawn an existing test container called, onos:nosetest to run the tests.
+
+* To start the cord-tester, make sure you have onos and radius containers started and running.
+* Then you can start it with the container id or tag like below:
+
+```
+sudo ./cord-setup.sh -o onos:latest -a freeradius:latest -t dhcp
+```
+
+* The above would spawn a test container and run the dhcp test.
+
+* If you want to run a list of tests, just separate them with hypens.
+
+```
+sudo ./cord-setup.sh -o onos:latest -a freeradius:latest -t dhcp-igmp-tls
+```
+
+* If you want to run a specific test, you can give the classname.testname like below
+
+```
+sudo ./cord-setup.sh -o onos:latest -a freeradius:latest -t dhcp:dhcp_exchange.test_dhcp_1request-igmp:test_igmp_1group_join_latency
+```
+
+* If you want to spawn a test and kill the test container after the tests are done, specify the -k option like below.
+
+```
+sudo ./cord-setup.sh -o onos:latest -a freeradius:latest -t dhcp -k
+```
+
+* If you want to cleanup all the test containers by tag onos:nosetest, then use the -C cleanup option to cleanup test containers.
+
+```
+sudo ./cord-setup.sh -o onos:latest -C onos:nosetest
+```
+
+* For other options, run with -h option.
diff --git a/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar b/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar
new file mode 100644
index 0000000..7f1a19d
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp-1.0-SNAPSHOT.oar
Binary files differ
diff --git a/src/test/apps/ciena-cordigmp/pom.xml b/src/test/apps/ciena-cordigmp/pom.xml
new file mode 100644
index 0000000..1776949
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp/pom.xml
@@ -0,0 +1,159 @@
+<?xml version="1.0" encoding="UTF-8"?>

+<!--

+  ~ Copyright 2016 Open Networking Laboratory

+  ~

+  ~ Licensed under the Apache License, Version 2.0 (the "License");

+  ~ you may not use this file except in compliance with the License.

+  ~ You may obtain a copy of the License at

+  ~

+  ~     http://www.apache.org/licenses/LICENSE-2.0

+  ~

+  ~ Unless required by applicable law or agreed to in writing, software

+  ~ distributed under the License is distributed on an "AS IS" BASIS,

+  ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+  ~ See the License for the specific language governing permissions and

+  ~ limitations under the License.

+  -->

+<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">

+    <modelVersion>4.0.0</modelVersion>

+

+    <groupId>org.ciena.cordigmp</groupId>

+    <artifactId>ciena-cordigmp</artifactId>

+    <version>1.0-SNAPSHOT</version>

+    <packaging>bundle</packaging>

+

+    <description>Ciena CORD IGMP for OVS</description>

+    <url>http://onosproject.org</url>

+

+    <properties>

+        <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>

+        <onos.version>1.5.0</onos.version>

+        <onos.app.name>org.ciena.cordigmp</onos.app.name>

+        <onos.app.requires>org.onosproject.olt</onos.app.requires>

+        <onos.app.title>Ciena IGMP for OVS</onos.app.title>

+        <onos.app.origin>Ciena Inc.</onos.app.origin>

+        <onos.app.category>default</onos.app.category>

+        <onos.app.url>http://onosproject.org</onos.app.url>

+        <onos.app.readme>ONOS OSGi bundle archetype.</onos.app.readme>

+    </properties>

+

+    <dependencies>

+        <dependency>

+            <groupId>org.onosproject</groupId>

+            <artifactId>onos-api</artifactId>

+            <version>${onos.version}</version>

+        </dependency>

+

+        <dependency>

+            <groupId>org.onosproject</groupId>

+            <artifactId>onlab-osgi</artifactId>

+            <version>${onos.version}</version>

+        </dependency>

+

+        <dependency>

+            <groupId>junit</groupId>

+            <artifactId>junit</artifactId>

+            <version>4.12</version>

+            <scope>test</scope>

+        </dependency>

+

+        <dependency>

+            <groupId>org.onosproject</groupId>

+            <artifactId>onos-api</artifactId>

+            <version>${onos.version}</version>

+            <scope>test</scope>

+            <classifier>tests</classifier>

+        </dependency>

+

+        <dependency>

+            <groupId>org.apache.felix</groupId>

+            <artifactId>org.apache.felix.scr.annotations</artifactId>

+            <version>1.9.12</version>

+            <scope>provided</scope>

+        </dependency>

+

+        <dependency>

+            <groupId>org.onosproject</groupId>

+            <artifactId>onos-cli</artifactId>

+            <version>${onos.version}</version>

+        </dependency>

+      <dependency>

+            <groupId>org.osgi</groupId>

+            <artifactId>org.osgi.compendium</artifactId>

+            <version>5.0.0</version>

+        </dependency>

+        <dependency>

+            <groupId>org.onosproject</groupId>

+            <artifactId>onos-app-olt-api</artifactId>

+            <version>${onos.version}</version>

+        </dependency>

+    </dependencies>

+

+    <build>

+        <plugins>

+            <plugin>

+                <groupId>org.apache.felix</groupId>

+                <artifactId>maven-bundle-plugin</artifactId>

+                <version>3.0.1</version>

+                <extensions>true</extensions>

+            </plugin>

+            <plugin>

+                <groupId>org.apache.maven.plugins</groupId>

+                <artifactId>maven-compiler-plugin</artifactId>

+                <version>2.5.1</version>

+                <configuration>

+                    <source>1.8</source>

+                    <target>1.8</target>

+                </configuration>

+            </plugin>

+            <plugin>

+                <groupId>org.apache.felix</groupId>

+                <artifactId>maven-scr-plugin</artifactId>

+                <version>1.21.0</version>

+                <executions>

+                    <execution>

+                        <id>generate-scr-srcdescriptor</id>

+                        <goals>

+                            <goal>scr</goal>

+                        </goals>

+                    </execution>

+                </executions>

+                <configuration>

+                    <supportedProjectTypes>

+                        <supportedProjectType>bundle</supportedProjectType>

+                        <supportedProjectType>war</supportedProjectType>

+                    </supportedProjectTypes>

+                </configuration>

+            </plugin>

+            <plugin>

+                <groupId>org.onosproject</groupId>

+                <artifactId>onos-maven-plugin</artifactId>

+                <version>1.9</version>

+                <executions>

+                    <execution>

+                        <id>cfg</id>

+                        <phase>generate-resources</phase>

+                        <goals>

+                            <goal>cfg</goal>

+                        </goals>

+                    </execution>

+                    <execution>

+                        <id>swagger</id>

+                        <phase>generate-sources</phase>

+                        <goals>

+                            <goal>swagger</goal>

+                        </goals>

+                    </execution>

+                    <execution>

+                        <id>app</id>

+                        <phase>package</phase>

+                        <goals>

+                            <goal>app</goal>

+                        </goals>

+                    </execution>

+                </executions>

+            </plugin>

+        </plugins>

+    </build>

+

+</project>

diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java
new file mode 100644
index 0000000..586854d
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmp.java
@@ -0,0 +1,506 @@
+/*
+ * Copyright 2015-2016 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.ciena.cordigmp;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.node.ArrayNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.collect.Multiset;
+import com.google.common.collect.ConcurrentHashMultiset;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.felix.scr.annotations.Activate;
+import org.apache.felix.scr.annotations.Component;
+import org.apache.felix.scr.annotations.Deactivate;
+import org.apache.felix.scr.annotations.Modified;
+import org.apache.felix.scr.annotations.Property;
+import org.apache.felix.scr.annotations.Reference;
+import org.apache.felix.scr.annotations.ReferenceCardinality;
+import org.onlab.packet.Ethernet;
+import org.onlab.packet.IpAddress;
+import org.onosproject.cfg.ComponentConfigService;
+import org.onosproject.codec.CodecService;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.core.CoreService;
+import org.onosproject.net.ConnectPoint;
+import org.onosproject.net.DeviceId;
+import org.onosproject.net.config.ConfigFactory;
+import org.onosproject.net.config.NetworkConfigEvent;
+import org.onosproject.net.config.NetworkConfigListener;
+import org.onosproject.net.config.NetworkConfigRegistry;
+import org.onosproject.net.config.basics.SubjectFactories;
+import org.onosproject.net.flow.DefaultTrafficSelector;
+import org.onosproject.net.flow.DefaultTrafficTreatment;
+import org.onosproject.net.flow.TrafficTreatment;
+import org.onosproject.net.flow.TrafficSelector;
+import org.onosproject.net.device.DeviceEvent;
+import org.onosproject.net.device.DeviceListener;
+import org.onosproject.net.device.DeviceService;
+import org.onosproject.net.flow.instructions.Instructions;
+import org.onosproject.net.flow.FlowEntry;
+import org.onosproject.net.flow.DefaultFlowEntry;
+import org.onosproject.net.flow.FlowRuleService;
+import org.onosproject.net.flowobjective.DefaultNextObjective;
+import org.onosproject.net.flowobjective.FlowObjectiveService;
+import org.onosproject.net.flowobjective.NextObjective;
+import org.onosproject.net.flowobjective.Objective;
+import org.onosproject.net.flowobjective.ObjectiveContext;
+import org.onosproject.net.flowobjective.ObjectiveError;
+import org.onosproject.net.mcast.McastEvent;
+import org.onosproject.net.mcast.McastListener;
+import org.onosproject.net.mcast.McastRoute;
+import org.onosproject.net.mcast.McastRouteInfo;
+import org.onosproject.net.mcast.MulticastRouteService;
+import org.onosproject.olt.AccessDeviceConfig;
+import org.onosproject.olt.AccessDeviceData;
+import org.onosproject.rest.AbstractWebResource;
+import org.osgi.service.component.ComponentContext;
+import org.onosproject.net.PortNumber;
+import org.onlab.packet.IPv4;
+import org.slf4j.Logger;
+
+import java.io.IOException;
+import java.util.Dictionary;
+import java.util.List;
+import java.util.Map;
+import java.util.Collection;
+import java.util.Properties;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static com.google.common.base.Preconditions.checkNotNull;
+import static com.google.common.base.Strings.isNullOrEmpty;
+import static org.onlab.util.Tools.get;
+import static org.slf4j.LoggerFactory.getLogger;
+
+/**
+ * CORD multicast provisioning application. Operates by listening to
+ * events on the multicast rib and provisioning groups to program multicast
+ * flows on the dataplane.
+ */
+@Component(immediate = true)
+public class CordIgmp {
+
+
+    private static final int DEFAULT_REST_TIMEOUT_MS = 2000;
+    private static final int DEFAULT_PRIORITY = 500;
+    private static final short DEFAULT_MCAST_VLAN = 4000;
+    private static final String DEFAULT_SYNC_HOST = "localhost:8181";
+    private static final String DEFAULT_USER = "karaf";
+    private static final String DEFAULT_PASSWORD = "karaf";
+    private static final boolean DEFAULT_VLAN_ENABLED = true;
+    private static final short DEFAULT_INPUT_PORT = 2;
+    private static final short DEFAULT_OUTPUT_PORT = 1;
+    private final Logger log = getLogger(getClass());
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected MulticastRouteService mcastService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected FlowObjectiveService flowObjectiveService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected CoreService coreService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected CodecService codecService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected ComponentConfigService componentConfigService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected NetworkConfigRegistry networkConfig;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected FlowRuleService flowRuleService;
+
+    @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
+    protected DeviceService deviceService;
+
+    protected McastListener listener = new InternalMulticastListener();
+    private InternalNetworkConfigListener configListener =
+            new InternalNetworkConfigListener();
+    private DeviceListener deviceListener = new InternalDeviceListener();
+
+    //Map of IGMP groups to port
+    private Map<IpAddress, IgmpPortPair> cordIgmpTranslateTable = Maps.newConcurrentMap();
+
+    //Count of group joins
+    private Multiset<IpAddress> cordIgmpCountTable = ConcurrentHashMultiset.create();
+    
+    //TODO: move this to distributed atomic long
+    private AtomicInteger channels = new AtomicInteger(0);
+
+    private ApplicationId appId;
+
+    @Property(name = "mcastVlan", intValue = DEFAULT_MCAST_VLAN,
+            label = "VLAN for multicast traffic")
+    private int mcastVlan = DEFAULT_MCAST_VLAN;
+
+    @Property(name = "vlanEnabled", boolValue = DEFAULT_VLAN_ENABLED,
+            label = "Use vlan for multicast traffic?")
+    private boolean vlanEnabled = DEFAULT_VLAN_ENABLED;
+
+    @Property(name = "priority", intValue = DEFAULT_PRIORITY,
+            label = "Priority for multicast rules")
+    private int priority = DEFAULT_PRIORITY;
+
+    @Property(name = "syncHost", value = DEFAULT_SYNC_HOST,
+            label = "host:port to synchronize routes to")
+    private String syncHost = DEFAULT_SYNC_HOST;
+
+    @Property(name = "username", value = DEFAULT_USER,
+            label = "Username for REST password authentication")
+    private String user = DEFAULT_USER;
+
+    @Property(name = "password", value = DEFAULT_PASSWORD,
+            label = "Password for REST authentication")
+    private String password = DEFAULT_PASSWORD;
+
+    @Property(name = "inputPort", intValue = DEFAULT_INPUT_PORT,
+              label = "Input port for OVS multicast traffic")
+    private int inputPort = DEFAULT_INPUT_PORT;
+
+    @Property(name = "outputPort", intValue = DEFAULT_OUTPUT_PORT,
+              label = "Output port for OVS multicast traffic")
+    private int outputPort = DEFAULT_OUTPUT_PORT;
+
+    private String fabricOnosUrl;
+
+    private Map<DeviceId, AccessDeviceData> oltData = new ConcurrentHashMap<>();
+
+    private Map<DeviceId, Boolean> deviceAvailability = new ConcurrentHashMap<>();
+
+    private static final Class<CordIgmpTranslateConfig> CORD_IGMP_TRANSLATE_CONFIG_CLASS =
+            CordIgmpTranslateConfig.class;
+
+    private ConfigFactory<ApplicationId, CordIgmpTranslateConfig> cordIgmpTranslateConfigFactory =
+            new ConfigFactory<ApplicationId, CordIgmpTranslateConfig>(
+                    SubjectFactories.APP_SUBJECT_FACTORY, CORD_IGMP_TRANSLATE_CONFIG_CLASS, "cordIgmpTranslate", true) {
+                @Override
+                public CordIgmpTranslateConfig createConfig() {
+                    return new CordIgmpTranslateConfig();
+                }
+            };
+
+
+    @Activate
+    public void activate(ComponentContext context) {
+        componentConfigService.registerProperties(getClass());
+        modified(context);
+
+        appId = coreService.registerApplication("org.ciena.cordigmp");
+
+        networkConfig.registerConfigFactory(cordIgmpTranslateConfigFactory);
+        networkConfig.addListener(configListener);
+
+        networkConfig.getSubjects(DeviceId.class, AccessDeviceConfig.class).forEach(
+                subject -> {
+                    AccessDeviceConfig config = networkConfig.getConfig(subject, AccessDeviceConfig.class);
+                    if (config != null) {
+                        AccessDeviceData data = config.getOlt();
+                        oltData.put(data.deviceId(), data);
+                    }
+                }
+        );
+
+        CordIgmpTranslateConfig cordIgmpTranslateConfig = networkConfig.getConfig(appId, CordIgmpTranslateConfig.class);
+
+        if(cordIgmpTranslateConfig != null) {
+            Collection<McastPorts> translations = cordIgmpTranslateConfig.getCordIgmpTranslations();
+            for(McastPorts port: translations) {
+                cordIgmpTranslateTable.put(port.group(), 
+                                           port.portPair());
+            }
+        }
+
+        mcastService.addListener(listener);
+
+        mcastService.getRoutes().stream()
+                .map(r -> new ImmutablePair<>(r, mcastService.fetchSinks(r)))
+                .filter(pair -> pair.getRight() != null && !pair.getRight().isEmpty())
+                .forEach(pair -> pair.getRight().forEach(sink -> provisionGroup(pair.getLeft(),
+                                                                                sink)));
+
+        deviceService.addListener(deviceListener);
+
+        log.info("Started");
+    }
+
+    @Deactivate
+    public void deactivate() {
+        componentConfigService.unregisterProperties(getClass(), false);
+        deviceService.removeListener(deviceListener);
+        mcastService.removeListener(listener);
+        networkConfig.unregisterConfigFactory(cordIgmpTranslateConfigFactory);
+        networkConfig.removeListener(configListener);
+        deviceAvailability.clear();
+        log.info("Stopped");
+    }
+
+    @Modified
+    public void modified(ComponentContext context) {
+        Dictionary<?, ?> properties = context != null ? context.getProperties() : new Properties();
+
+        try {
+            String s = get(properties, "username");
+            user = isNullOrEmpty(s) ? DEFAULT_USER : s.trim();
+
+            s = get(properties, "password");
+            password = isNullOrEmpty(s) ? DEFAULT_PASSWORD : s.trim();
+
+            s = get(properties, "mcastVlan");
+            mcastVlan = isNullOrEmpty(s) ? DEFAULT_MCAST_VLAN : Short.parseShort(s.trim());
+
+            s = get(properties, "vlanEnabled");
+            vlanEnabled = isNullOrEmpty(s) ? DEFAULT_VLAN_ENABLED : Boolean.parseBoolean(s.trim());
+
+            s = get(properties, "priority");
+            priority = isNullOrEmpty(s) ? DEFAULT_PRIORITY : Integer.parseInt(s.trim());
+
+            s = get(properties, "syncHost");
+            syncHost = isNullOrEmpty(s) ? DEFAULT_SYNC_HOST : s.trim();
+            log.warn("Sync Host = " + syncHost);
+
+            s = get(properties, "inputPort");
+            inputPort = isNullOrEmpty(s) ? DEFAULT_INPUT_PORT : Short.parseShort(s.trim());
+
+            s = get(properties, "outputPort");
+            outputPort = isNullOrEmpty(s) ? DEFAULT_OUTPUT_PORT : Short.parseShort(s.trim());
+
+        } catch (Exception e) {
+            user = DEFAULT_USER;
+            password = DEFAULT_PASSWORD;
+            syncHost = DEFAULT_SYNC_HOST;
+            mcastVlan = DEFAULT_MCAST_VLAN;
+            vlanEnabled = false;
+            priority = DEFAULT_PRIORITY;
+            inputPort = DEFAULT_INPUT_PORT;
+            outputPort = DEFAULT_OUTPUT_PORT;
+        }
+        fabricOnosUrl = createRemoteUrl(syncHost);
+    }
+
+    private static String createRemoteUrl(String remoteHost) {
+        return "http://" + remoteHost + "/onos/v1/mcast";
+    }
+
+    private class InternalMulticastListener implements McastListener {
+        @Override
+        public void event(McastEvent event) {
+            McastRouteInfo info = event.subject();
+            switch (event.type()) {
+                case ROUTE_ADDED:
+                    break;
+                case ROUTE_REMOVED:
+                    break;
+                case SOURCE_ADDED:
+                    break;
+                case SINK_ADDED:
+                    if (!info.sink().isPresent()) {
+                        log.warn("No sink given after sink added event: {}", info);
+                        return;
+                    }
+                    provisionGroup(info.route(), info.sink().get());
+                    break;
+                case SINK_REMOVED:
+                    unprovisionGroup(event.subject());
+                    break;
+                default:
+                    log.warn("Unknown mcast event {}", event.type());
+            }
+        }
+    }
+    
+    private void provisionFilterIgmp(DeviceId devId, boolean remove) {
+        Boolean deviceStatus = deviceAvailability.get(devId);
+        if(deviceStatus != null) {
+            if(remove == false) {
+                return;
+            }
+        } else if(remove == true) {
+            return;
+        }
+        TrafficSelector.Builder igmp = DefaultTrafficSelector.builder()
+            .matchEthType(Ethernet.TYPE_IPV4)
+            .matchIPProtocol(IPv4.PROTOCOL_IGMP);
+        TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder()
+            .setOutput(PortNumber.CONTROLLER);
+        FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
+        flowEntry.forDevice(devId);
+        flowEntry.withPriority(priority);
+        flowEntry.withSelector(igmp.build());
+        flowEntry.withTreatment(treatment.build());
+        flowEntry.fromApp(appId);
+        flowEntry.makePermanent();
+        if(remove == false) {
+            deviceAvailability.put(devId, true);
+            flowRuleService.applyFlowRules(flowEntry.build());
+        } else {
+            deviceAvailability.remove(devId);
+            flowRuleService.removeFlowRules(flowEntry.build());
+        }
+        log.warn("IGMP flow rule " + ( remove ? "removed" : "added" ) + " for device id " + devId);
+    }
+
+    private class InternalDeviceListener implements DeviceListener {
+        @Override
+        public void event(DeviceEvent event) {
+            DeviceId devId = event.subject().id();
+            switch (event.type()) {
+
+                case DEVICE_ADDED:
+                case DEVICE_UPDATED:
+                    provisionFilterIgmp(devId, false);
+                    break;
+                case DEVICE_AVAILABILITY_CHANGED:
+                    if(deviceService.isAvailable(devId)) {
+                        provisionFilterIgmp(devId, false);
+                    } else {
+                        provisionFilterIgmp(devId, true);
+                    }
+                    break;
+                case DEVICE_REMOVED:
+                case DEVICE_SUSPENDED:
+                    provisionFilterIgmp(devId, true);
+                    break;
+                case PORT_STATS_UPDATED:
+                case PORT_ADDED:
+                case PORT_UPDATED:
+                case PORT_REMOVED:
+                    //log.debug("Got event " + event.type() + " for device " + devId);
+                    break;
+                default:
+                    log.warn("Unknown device event {}", event.type());
+                    break;
+            }
+        }
+    }
+
+    private void unprovisionGroup(McastRouteInfo info) {
+        if (!info.sink().isPresent()) {
+            log.warn("No sink given after sink removed event: {}", info);
+            return;
+        }
+        ConnectPoint loc = info.sink().get();
+        AccessDeviceData oltInfo = oltData.get(loc.deviceId());
+        if(oltInfo != null) {
+            log.warn("Ignoring deprovisioning mcast route for OLT device: " + loc.deviceId());
+            return;
+        }
+        final IgmpPortPair portPair = cordIgmpTranslateTable.get(info.route().group());
+        if(portPair == null) {
+            log.warn("Ignoring unprovisioning for group " + info.route().group() + " with no port map");
+            return;
+        }
+        if(cordIgmpCountTable.remove(info.route().group(), 1) <= 1) {
+            //Remove flow for last channel leave
+            final PortNumber inPort = PortNumber.portNumber(portPair.inputPort());
+            final PortNumber outPort = PortNumber.portNumber(portPair.outputPort());
+            TrafficSelector.Builder mcast = DefaultTrafficSelector.builder()
+                .matchInPort(inPort)
+                .matchEthType(Ethernet.TYPE_IPV4)
+                .matchIPDst(info.route().group().toIpPrefix());
+            TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
+            FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
+            treatment.add(Instructions.createOutput(outPort));
+            flowEntry.forDevice(loc.deviceId());
+            flowEntry.withPriority(priority);
+            flowEntry.withSelector(mcast.build());
+            flowEntry.withTreatment(treatment.build());
+            flowEntry.fromApp(appId);
+            flowEntry.makePermanent();
+            flowRuleService.removeFlowRules(flowEntry.build());
+            log.warn("Flow rule removed for for device id " + loc.deviceId());
+        }
+    }
+
+    private void provisionGroup(McastRoute route, ConnectPoint sink) {
+        checkNotNull(route, "Route cannot be null");
+        checkNotNull(sink, "Sink cannot be null");
+
+        AccessDeviceData oltInfo = oltData.get(sink.deviceId());
+        if(oltInfo != null) {
+            log.warn("Ignoring provisioning mcast route for OLT device: " + sink.deviceId());
+            return;
+        } 
+        final IgmpPortPair portPair = cordIgmpTranslateTable.get(route.group());
+        if(portPair == null) {
+            log.warn("Ports for Group " + route.group() + " not found in cord igmp map. Skipping provisioning.");
+            return;
+        }
+        if(cordIgmpCountTable.count(route.group()) == 0) {
+            //First group entry. Provision the flows
+            final PortNumber inPort = PortNumber.portNumber(portPair.inputPort());
+            final PortNumber outPort = PortNumber.portNumber(portPair.outputPort());
+            TrafficSelector.Builder mcast = DefaultTrafficSelector.builder()
+                    .matchInPort(inPort)
+                    .matchEthType(Ethernet.TYPE_IPV4)
+                    .matchIPDst(route.group().toIpPrefix());
+            TrafficTreatment.Builder treatment = DefaultTrafficTreatment.builder();
+            FlowEntry.Builder flowEntry = DefaultFlowEntry.builder();
+            treatment.add(Instructions.createOutput(outPort));
+            flowEntry.forDevice(sink.deviceId());
+            flowEntry.withPriority(priority);
+            flowEntry.withSelector(mcast.build());
+            flowEntry.withTreatment(treatment.build());
+            flowEntry.fromApp(appId);
+            flowEntry.makePermanent();
+            flowRuleService.applyFlowRules(flowEntry.build());
+            log.warn("Flow rules applied for device id " + sink.deviceId());
+        }
+        cordIgmpCountTable.add(route.group());
+    }
+
+    private class InternalNetworkConfigListener implements NetworkConfigListener {
+        @Override
+        public void event(NetworkConfigEvent event) {
+            switch (event.type()) {
+
+                case CONFIG_ADDED:
+                case CONFIG_UPDATED:
+                    if (event.configClass().equals(CORD_IGMP_TRANSLATE_CONFIG_CLASS)) {
+                        CordIgmpTranslateConfig config =
+                                networkConfig.getConfig((ApplicationId) event.subject(),
+                                        CORD_IGMP_TRANSLATE_CONFIG_CLASS);
+                        if (config != null) {
+                            cordIgmpTranslateTable.clear();
+                            cordIgmpCountTable.clear();
+                            config.getCordIgmpTranslations().forEach(
+                                                                     mcastPorts -> cordIgmpTranslateTable.put(mcastPorts.group(), mcastPorts.portPair()));
+                        }
+                    }
+                    break;
+                case CONFIG_REGISTERED:
+                case CONFIG_UNREGISTERED:
+                case CONFIG_REMOVED:
+                    break;
+                default:
+                    break;
+            }
+        }
+
+        //@Override
+        //public boolean isRelevant(NetworkConfigEvent event) {
+        //    return event.configClass().equals(CORD_IGMP_TRANSLATE_CONFIG_CLASS);
+        //}
+
+
+    }
+
+}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java
new file mode 100644
index 0000000..e55cf11
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/CordIgmpTranslateConfig.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2016 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.ciena.cordigmp;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+import org.onlab.packet.IpAddress;
+import org.onosproject.core.ApplicationId;
+import org.onosproject.net.config.Config;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * IGMP SSM translate configuration.
+ */
+public class CordIgmpTranslateConfig extends Config<ApplicationId> {
+
+    private static final String GROUP = "group";
+    private static final String INPUT_PORT = "inputPort";
+    private static final String OUTPUT_PORT = "outputPort";
+
+    @Override
+    public boolean isValid() {
+        for (JsonNode node : array) {
+            if (!hasOnlyFields((ObjectNode) node, GROUP, INPUT_PORT, OUTPUT_PORT)) {
+                return false;
+            }
+
+            if (!(isIpAddress((ObjectNode) node, GROUP, FieldPresence.MANDATORY) && 
+                  node.get(INPUT_PORT).isInt() && node.get(OUTPUT_PORT).isInt())) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    /**
+     * Gets the list of CordIgmp translations.
+     *
+     * @return CordIgmp translations
+     */
+    public List<McastPorts> getCordIgmpTranslations() {
+        List<McastPorts> translations = new ArrayList();
+        for (JsonNode node : array) {
+            translations.add(
+                    new McastPorts(
+                            IpAddress.valueOf(node.path(GROUP).asText().trim()),
+                            Integer.valueOf(node.path(INPUT_PORT).asText().trim()),
+                            Integer.valueOf(node.path(OUTPUT_PORT).asText().trim())));
+        }
+        return translations;
+    }
+}
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java
new file mode 100644
index 0000000..176cac9
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/IgmpPortPair.java
@@ -0,0 +1,20 @@
+package org.ciena.cordigmp;
+
+public class IgmpPortPair {
+    private final Integer inputPort;
+    private final Integer outputPort;
+
+    public IgmpPortPair(Integer inputPort, Integer outputPort) {
+        this.inputPort = inputPort;
+        this.outputPort = outputPort;
+    }
+
+    public Integer inputPort() {
+        return inputPort;
+    }
+
+    public Integer outputPort() {
+        return outputPort;
+    }
+}
+
diff --git a/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java
new file mode 100644
index 0000000..e0cd622
--- /dev/null
+++ b/src/test/apps/ciena-cordigmp/src/main/java/org/ciena/cordigmp/McastPorts.java
@@ -0,0 +1,91 @@
+/*
+ * Copyright 2015 Open Networking Laboratory
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.ciena.cordigmp;
+
+import com.google.common.annotations.Beta;
+import com.google.common.base.Objects;
+import org.onlab.packet.IpAddress;
+
+import static com.google.common.base.MoreObjects.toStringHelper;
+import static com.google.common.base.Preconditions.checkNotNull;
+
+/**
+ * An entity representing a multicast group and its input and output ports
+ */
+@Beta
+public class McastPorts {
+
+    private final IpAddress group;
+    private final IgmpPortPair portPair;
+
+    public McastPorts(IpAddress group, Integer inputPort, Integer outputPort) {
+        checkNotNull(group, "Multicast route must specify a group address");
+        checkNotNull(inputPort, "Must indicate input port");
+        checkNotNull(outputPort, "Must indicate output port");
+        this.group = group;
+        this.portPair = new IgmpPortPair(inputPort, outputPort);
+    }
+
+    /**
+     * Fetches the group address of this route.
+     *
+     * @return an ip address
+     */
+    public IpAddress group() {
+        return group;
+    }
+
+    public Integer inputPort() {
+        return portPair.inputPort();
+    }
+
+    public Integer outputPort() {
+        return portPair.outputPort();
+    }
+
+    public IgmpPortPair portPair() {
+        return portPair;
+    }
+
+    @Override
+    public String toString() {
+        return toStringHelper(this)
+                .add("group", group)
+                .add("inputPort", inputPort())
+                .add("outputPort", outputPort())
+                .toString();
+    }
+
+    @Override
+    public boolean equals(Object o) {
+        if (this == o) {
+            return true;
+        }
+        if (o == null || getClass() != o.getClass()) {
+            return false;
+        }
+        McastPorts that = (McastPorts) o;
+        return Objects.equal(group, that.group) &&
+               Objects.equal(inputPort(), that.inputPort()) &&
+               Objects.equal(outputPort(), that.outputPort());
+    }
+
+    @Override
+    public int hashCode() {
+        return Objects.hashCode(group, inputPort(), outputPort());
+    }
+
+}
diff --git a/src/test/builder/buildFsm.sh b/src/test/builder/buildFsm.sh
index 87bc14f..cba0577 100644
--- a/src/test/builder/buildFsm.sh
+++ b/src/test/builder/buildFsm.sh
@@ -12,9 +12,9 @@
 
 
 ##Generate DNS test state machine
-python yamlFsm.py -p DnsHolder -f noseDnsTest.yaml > ${odir}/noseDnsHolder.py
+#python yamlFsm.py -p DnsHolder -f noseDnsTest.yaml > ${odir}/noseDnsHolder.py
 
 #Generate EAP MD5 authentication state machine
-python yamlFsm.py -p Md5AuthHolder -f noseMd5AuthTest.yaml > ${odir}/noseMd5AuthHolder.py
+python yamlFsm.py -p Md5AuthHolder -f noseMD5AuthTest.yaml > ${odir}/noseMd5AuthHolder.py
 
 
diff --git a/src/test/builder/noseDnsTest.yaml b/src/test/builder/noseDnsTest.yaml
new file mode 100644
index 0000000..0b85b4e
--- /dev/null
+++ b/src/test/builder/noseDnsTest.yaml
@@ -0,0 +1,9 @@
+States:
+    ST_DNS_SND_REC:
+        Events:
+            EVT_DNS_SND_REC:
+                Actions:
+                    - _dns_snd_rec 
+                NextState: ST_DNS_FINAL
+        
+
diff --git a/src/test/builder/noseMD5AuthTest.yaml b/src/test/builder/noseMD5AuthTest.yaml
new file mode 100644
index 0000000..7d43175
--- /dev/null
+++ b/src/test/builder/noseMD5AuthTest.yaml
@@ -0,0 +1,32 @@
+States:
+    ST_EAP_SETUP:
+        Events:
+            EVT_EAP_SETUP:
+                Actions:
+                    - _eapSetup
+                NextState: ST_EAP_START
+    ST_EAP_START:
+        Events:
+            EVT_EAP_START:
+                Actions:
+                    - _eapStart
+                NextState: ST_EAP_ID_REQ
+    ST_EAP_ID_REQ:
+        Events:
+            EVT_EAP_ID_REQ:
+                Actions:
+                    - _eapIdReq
+                NextState: ST_EAP_MD5_CHALLENGE
+    ST_EAP_MD5_CHALLENGE:
+        Events:
+            EVT_EAP_MD5_CHALLENGE:
+                Actions:
+                    - _eapMd5Challenge 
+                NextState: ST_EAP_STATUS
+    ST_EAP_STATUS:
+        Events:
+            EVT_EAP_STATUS:
+                Actions:
+                    - _eapStatus
+                NextState: ST_EAP_MD5_DONE
+
diff --git a/src/test/builder/noseTlsAuthTest.yaml b/src/test/builder/noseTlsAuthTest.yaml
index 5a7f828..dfdd6b4 100644
--- a/src/test/builder/noseTlsAuthTest.yaml
+++ b/src/test/builder/noseTlsAuthTest.yaml
@@ -28,5 +28,17 @@
             EVT_EAP_TLS_CERT_REQ:
                 Actions:
                     - _eapTlsCertReq
+                NextState: ST_EAP_TLS_CHANGE_CIPHER_SPEC
+    ST_EAP_TLS_CHANGE_CIPHER_SPEC:
+        Events:
+            EVT_EAP_TLS_CHANGE_CIPHER_SPEC:
+                Actions:
+                    - _eapTlsChangeCipherSpec
+                NextState: ST_EAP_TLS_FINISHED
+    ST_EAP_TLS_FINISHED:
+        Events:
+            EVT_EAP_TLS_FINISHED:
+                Actions:
+                    - _eapTlsFinished
                 NextState: ST_EAP_TLS_DONE
         
\ No newline at end of file
diff --git a/src/test/cli/__init__.py b/src/test/cli/__init__.py
new file mode 100644
index 0000000..900be31
--- /dev/null
+++ b/src/test/cli/__init__.py
@@ -0,0 +1,5 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+__path__.append(utils_dir)
diff --git a/src/test/cli/ast.py b/src/test/cli/ast.py
new file mode 100644
index 0000000..fd5dfdb
--- /dev/null
+++ b/src/test/cli/ast.py
@@ -0,0 +1,311 @@
+# -*- coding: utf-8 -*-
+"""
+    ast
+    ~~~
+
+    The `ast` module helps Python applications to process trees of the Python
+    abstract syntax grammar.  The abstract syntax itself might change with
+    each Python release; this module helps to find out programmatically what
+    the current grammar looks like and allows modifications of it.
+
+    An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
+    a flag to the `compile()` builtin function or by using the `parse()`
+    function from this module.  The result will be a tree of objects whose
+    classes all inherit from `ast.AST`.
+
+    A modified abstract syntax tree can be compiled into a Python code object
+    using the built-in `compile()` function.
+
+    Additionally various helper functions are provided that make working with
+    the trees simpler.  The main intention of the helper functions and this
+    module in general is to provide an easy to use interface for libraries
+    that work tightly with the python syntax (template engines for example).
+
+
+    :copyright: Copyright 2008 by Armin Ronacher.
+    :license: Python License.
+"""
+from _ast import *
+from _ast import __version__
+
+
+def parse(source, filename='<unknown>', mode='exec'):
+    """
+    Parse the source into an AST node.
+    Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
+    """
+    return compile(source, filename, mode, PyCF_ONLY_AST)
+
+
+def literal_eval(node_or_string):
+    """
+    Safely evaluate an expression node or a string containing a Python
+    expression.  The string or node provided may only consist of the following
+    Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
+    and None.
+    """
+    _safe_names = {'None': None, 'True': True, 'False': False}
+    if isinstance(node_or_string, basestring):
+        node_or_string = parse(node_or_string, mode='eval')
+    if isinstance(node_or_string, Expression):
+        node_or_string = node_or_string.body
+    def _convert(node):
+        if isinstance(node, Str):
+            return node.s
+        elif isinstance(node, Num):
+            return node.n
+        elif isinstance(node, Tuple):
+            return tuple(map(_convert, node.elts))
+        elif isinstance(node, List):
+            return list(map(_convert, node.elts))
+        elif isinstance(node, Dict):
+            return dict((_convert(k), _convert(v)) for k, v
+                        in zip(node.keys, node.values))
+        elif isinstance(node, Name):
+            if node.id in _safe_names:
+                return _safe_names[node.id]
+        elif isinstance(node, BinOp) and \
+             isinstance(node.op, (Add, Sub)) and \
+             isinstance(node.right, Num) and \
+             isinstance(node.right.n, complex) and \
+             isinstance(node.left, Num) and \
+             isinstance(node.left.n, (int, long, float)):
+            left = node.left.n
+            right = node.right.n
+            if isinstance(node.op, Add):
+                return left + right
+            else:
+                return left - right
+        raise ValueError('malformed string')
+    return _convert(node_or_string)
+
+
+def dump(node, annotate_fields=True, include_attributes=False):
+    """
+    Return a formatted dump of the tree in *node*.  This is mainly useful for
+    debugging purposes.  The returned string will show the names and the values
+    for fields.  This makes the code impossible to evaluate, so if evaluation is
+    wanted *annotate_fields* must be set to False.  Attributes such as line
+    numbers and column offsets are not dumped by default.  If this is wanted,
+    *include_attributes* can be set to True.
+    """
+    def _format(node):
+        if isinstance(node, AST):
+            fields = [(a, _format(b)) for a, b in iter_fields(node)]
+            rv = '%s(%s' % (node.__class__.__name__, ', '.join(
+                ('%s=%s' % field for field in fields)
+                if annotate_fields else
+                (b for a, b in fields)
+            ))
+            if include_attributes and node._attributes:
+                rv += fields and ', ' or ' '
+                rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
+                                for a in node._attributes)
+            return rv + ')'
+        elif isinstance(node, list):
+            return '[%s]' % ', '.join(_format(x) for x in node)
+        return repr(node)
+    if not isinstance(node, AST):
+        raise TypeError('expected AST, got %r' % node.__class__.__name__)
+    return _format(node)
+
+
+def copy_location(new_node, old_node):
+    """
+    Copy source location (`lineno` and `col_offset` attributes) from
+    *old_node* to *new_node* if possible, and return *new_node*.
+    """
+    for attr in 'lineno', 'col_offset':
+        if attr in old_node._attributes and attr in new_node._attributes \
+           and hasattr(old_node, attr):
+            setattr(new_node, attr, getattr(old_node, attr))
+    return new_node
+
+
+def fix_missing_locations(node):
+    """
+    When you compile a node tree with compile(), the compiler expects lineno and
+    col_offset attributes for every node that supports them.  This is rather
+    tedious to fill in for generated nodes, so this helper adds these attributes
+    recursively where not already set, by setting them to the values of the
+    parent node.  It works recursively starting at *node*.
+    """
+    def _fix(node, lineno, col_offset):
+        if 'lineno' in node._attributes:
+            if not hasattr(node, 'lineno'):
+                node.lineno = lineno
+            else:
+                lineno = node.lineno
+        if 'col_offset' in node._attributes:
+            if not hasattr(node, 'col_offset'):
+                node.col_offset = col_offset
+            else:
+                col_offset = node.col_offset
+        for child in iter_child_nodes(node):
+            _fix(child, lineno, col_offset)
+    _fix(node, 1, 0)
+    return node
+
+
+def increment_lineno(node, n=1):
+    """
+    Increment the line number of each node in the tree starting at *node* by *n*.
+    This is useful to "move code" to a different location in a file.
+    """
+    for child in walk(node):
+        if 'lineno' in child._attributes:
+            child.lineno = getattr(child, 'lineno', 0) + n
+    return node
+
+
+def iter_fields(node):
+    """
+    Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
+    that is present on *node*.
+    """
+    for field in node._fields:
+        try:
+            yield field, getattr(node, field)
+        except AttributeError:
+            pass
+
+
+def iter_child_nodes(node):
+    """
+    Yield all direct child nodes of *node*, that is, all fields that are nodes
+    and all items of fields that are lists of nodes.
+    """
+    for name, field in iter_fields(node):
+        if isinstance(field, AST):
+            yield field
+        elif isinstance(field, list):
+            for item in field:
+                if isinstance(item, AST):
+                    yield item
+
+
+def get_docstring(node, clean=True):
+    """
+    Return the docstring for the given node or None if no docstring can
+    be found.  If the node provided does not have docstrings a TypeError
+    will be raised.
+    """
+    if not isinstance(node, (FunctionDef, ClassDef, Module)):
+        raise TypeError("%r can't have docstrings" % node.__class__.__name__)
+    if node.body and isinstance(node.body[0], Expr) and \
+       isinstance(node.body[0].value, Str):
+        if clean:
+            import inspect
+            return inspect.cleandoc(node.body[0].value.s)
+        return node.body[0].value.s
+
+
+def walk(node):
+    """
+    Recursively yield all descendant nodes in the tree starting at *node*
+    (including *node* itself), in no specified order.  This is useful if you
+    only want to modify nodes in place and don't care about the context.
+    """
+    from collections import deque
+    todo = deque([node])
+    while todo:
+        node = todo.popleft()
+        todo.extend(iter_child_nodes(node))
+        yield node
+
+
+class NodeVisitor(object):
+    """
+    A node visitor base class that walks the abstract syntax tree and calls a
+    visitor function for every node found.  This function may return a value
+    which is forwarded by the `visit` method.
+
+    This class is meant to be subclassed, with the subclass adding visitor
+    methods.
+
+    Per default the visitor functions for the nodes are ``'visit_'`` +
+    class name of the node.  So a `TryFinally` node visit function would
+    be `visit_TryFinally`.  This behavior can be changed by overriding
+    the `visit` method.  If no visitor function exists for a node
+    (return value `None`) the `generic_visit` visitor is used instead.
+
+    Don't use the `NodeVisitor` if you want to apply changes to nodes during
+    traversing.  For this a special visitor exists (`NodeTransformer`) that
+    allows modifications.
+    """
+
+    def visit(self, node):
+        """Visit a node."""
+        method = 'visit_' + node.__class__.__name__
+        visitor = getattr(self, method, self.generic_visit)
+        return visitor(node)
+
+    def generic_visit(self, node):
+        """Called if no explicit visitor function exists for a node."""
+        for field, value in iter_fields(node):
+            if isinstance(value, list):
+                for item in value:
+                    if isinstance(item, AST):
+                        self.visit(item)
+            elif isinstance(value, AST):
+                self.visit(value)
+
+
+class NodeTransformer(NodeVisitor):
+    """
+    A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
+    allows modification of nodes.
+
+    The `NodeTransformer` will walk the AST and use the return value of the
+    visitor methods to replace or remove the old node.  If the return value of
+    the visitor method is ``None``, the node will be removed from its location,
+    otherwise it is replaced with the return value.  The return value may be the
+    original node in which case no replacement takes place.
+
+    Here is an example transformer that rewrites all occurrences of name lookups
+    (``foo``) to ``data['foo']``::
+
+       class RewriteName(NodeTransformer):
+
+           def visit_Name(self, node):
+               return copy_location(Subscript(
+                   value=Name(id='data', ctx=Load()),
+                   slice=Index(value=Str(s=node.id)),
+                   ctx=node.ctx
+               ), node)
+
+    Keep in mind that if the node you're operating on has child nodes you must
+    either transform the child nodes yourself or call the :meth:`generic_visit`
+    method for the node first.
+
+    For nodes that were part of a collection of statements (that applies to all
+    statement nodes), the visitor may also return a list of nodes rather than
+    just a single node.
+
+    Usually you use the transformer like this::
+
+       node = YourTransformer().visit(node)
+    """
+
+    def generic_visit(self, node):
+        for field, old_value in iter_fields(node):
+            old_value = getattr(node, field, None)
+            if isinstance(old_value, list):
+                new_values = []
+                for value in old_value:
+                    if isinstance(value, AST):
+                        value = self.visit(value)
+                        if value is None:
+                            continue
+                        elif not isinstance(value, AST):
+                            new_values.extend(value)
+                            continue
+                    new_values.append(value)
+                old_value[:] = new_values
+            elif isinstance(old_value, AST):
+                new_node = self.visit(old_value)
+                if new_node is None:
+                    delattr(node, field)
+                else:
+                    setattr(node, field, new_node)
+        return node
diff --git a/src/test/cli/clicommon.py b/src/test/cli/clicommon.py
new file mode 100644
index 0000000..9110fdf
--- /dev/null
+++ b/src/test/cli/clicommon.py
@@ -0,0 +1,21 @@
+import os,sys
+from utilities import Utilities, utilities
+from scapy.all import *
+
+#log.setLevel('INFO')
+class MAIN(object):
+    def __init__(self):
+        global utilities
+        self.log = log
+        self.logdir = os.getenv('HOME')
+        self.logHeader = ''
+        self.utilities = utilities
+        self.TRUE = True
+        self.FALSE = False
+        self.EXPERIMENTAL_MODE = self.FALSE
+
+    def cleanup(self): pass
+
+    def exit(self): pass
+
+main = MAIN()
diff --git a/src/test/cli/clidriver.py b/src/test/cli/clidriver.py
new file mode 100644
index 0000000..ccb27a0
--- /dev/null
+++ b/src/test/cli/clidriver.py
@@ -0,0 +1,353 @@
+#!/usr/bin/env python
+"""
+Created on 24-Oct-2012
+
+author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
+          Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
+
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    ( at your option ) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+
+
+
+"""
+import pexpect
+import re
+from component import Component
+from clicommon import *
+import os
+
+class CLI( Component ):
+
+    """
+        This will define common functions for CLI included.
+    """
+    def __init__( self ):
+        super( Component, self ).__init__()
+
+    def connect( self, **connectargs ):
+        """
+           Connection will establish to the remote host using ssh.
+           It will take user_name ,ip_address and password as arguments<br>
+           and will return the handle.
+        """
+        for key in connectargs:
+            vars( self )[ key ] = connectargs[ key ]
+
+        connect_result = super( CLI, self ).connect()
+        ssh_newkey = 'Are you sure you want to continue connecting'
+        refused = "ssh: connect to host " + \
+            self.ip_address + " port 22: Connection refused"
+        if self.port:
+            ssh_hosts_file = os.path.join(os.getenv('HOME'), '.ssh', 'known_hosts')
+            cmd_host_remove = 'ssh-keygen -f "%s" -R [%s]:8101' %(ssh_hosts_file, self.ip_address)
+            os.system(cmd_host_remove)
+            #main.log.info('SSH host remove cmd: %s' %cmd_host_remove)
+            main.log.info('Spawning pexpect for ip %s' %self.ip_address)
+            self.handle = pexpect.spawn(
+                'ssh -p ' +
+                self.port +
+                ' ' +
+                '-o StrictHostKeyChecking=no ' + 
+                self.user_name +
+                '@' +
+                self.ip_address,
+                env={ "TERM": "xterm-mono" },
+                maxread=50000 )
+        else:
+            self.handle = pexpect.spawn(
+                'ssh -X ' +
+                self.user_name +
+                '@' +
+                self.ip_address,
+                env={ "TERM": "xterm-mono" },
+                maxread=1000000,
+                timeout=60 )
+
+        self.handle.logfile = self.logfile_handler
+        i = 5
+        while i == 5:
+            i = self.handle.expect( [
+                                    ssh_newkey,
+                                    'password:|Password:',
+                                    pexpect.EOF,
+                                    pexpect.TIMEOUT,
+                                    refused,
+                                    'teston>',
+                                    '>|#|\$' ],
+                            120 )
+            if i == 0:  # Accept key, then expect either a password prompt or access
+                main.log.info( "ssh key confirmation received, send yes" )
+                self.handle.sendline( 'yes' )
+                i = 5  # Run the loop again
+                continue
+            if i == 1:  # Password required
+                if self.pwd:
+                    main.log.info(
+                    "ssh connection asked for password, gave password" )
+                else:
+                    main.log.info( "Server asked for password, but none was "
+                                    "given in the .topo file. Trying "
+                                    "no password.")
+                    self.pwd = ""
+                self.handle.sendline( self.pwd )
+                j = self.handle.expect( [
+                                        '>|#|\$',
+                                        'password:|Password:',
+                                        pexpect.EOF,
+                                        pexpect.TIMEOUT ],
+                                        120 )
+                if j != 0:
+                    main.log.error( "Incorrect Password" )
+                    return main.FALSE
+            elif i == 2:
+                main.log.error( "Connection timeout" )
+                return main.FALSE
+            elif i == 3:  # timeout
+                main.log.error(
+                    "No route to the Host " +
+                    self.user_name +
+                    "@" +
+                    self.ip_address )
+                return main.FALSE
+            elif i == 4:
+                main.log.error(
+                    "ssh: connect to host " +
+                    self.ip_address +
+                    " port 22: Connection refused" )
+                return main.FALSE
+            elif i == 6:
+                main.log.info( "Password not required logged in" )
+
+        self.handle.sendline( "" )
+        self.handle.expect( '>|#|\$' )
+        return self.handle
+
+    def disconnect( self ):
+        result = super( CLI, self ).disconnect( self )
+        result = main.TRUE
+        # self.execute( cmd="exit",timeout=120,prompt="(.*)" )
+
+    def execute( self, **execparams ):
+        """
+        It facilitates the command line execution of a given command. It has arguments as :
+        cmd => represents command to be executed,
+        prompt => represents expect command prompt or output,
+        timeout => timeout for command execution,
+        more => to provide a key press if it is on.
+
+        It will return output of command exection.
+        """
+        result = super( CLI, self ).execute( self )
+        defaultPrompt = '.*[$>\#]'
+        args = utilities.parse_args( [ "CMD",
+                                       "TIMEOUT",
+                                       "PROMPT",
+                                       "MORE" ],
+                                     **execparams )
+
+        expectPrompt = args[ "PROMPT" ] if args[ "PROMPT" ] else defaultPrompt
+        self.LASTRSP = ""
+        timeoutVar = args[ "TIMEOUT" ] if args[ "TIMEOUT" ] else 10
+        cmd = ''
+        if args[ "CMD" ]:
+            cmd = args[ "CMD" ]
+        else:
+            return 0
+        if args[ "MORE" ] is None:
+            args[ "MORE" ] = " "
+        self.handle.sendline( cmd )
+        self.lastCommand = cmd
+        index = self.handle.expect( [ expectPrompt,
+                                      "--More--",
+                                      'Command not found.',
+                                      pexpect.TIMEOUT,
+                                      "^:$" ],
+                                    timeout=timeoutVar )
+        if index == 0:
+            self.LASTRSP = self.LASTRSP + \
+                self.handle.before + self.handle.after
+            main.log.info( "Executed :" + str(cmd ) +
+                           " \t\t Expected Prompt '" + str( expectPrompt) +
+                           "' Found" )
+        elif index == 1:
+            self.LASTRSP = self.LASTRSP + self.handle.before
+            self.handle.send( args[ "MORE" ] )
+            main.log.info(
+                "Found More screen to go , Sending a key to proceed" )
+            indexMore = self.handle.expect(
+                [ "--More--", expectPrompt ], timeout=timeoutVar )
+            while indexMore == 0:
+                main.log.info(
+                    "Found anoother More screen to go , Sending a key to proceed" )
+                self.handle.send( args[ "MORE" ] )
+                indexMore = self.handle.expect(
+                    [ "--More--", expectPrompt ], timeout=timeoutVar )
+                self.LASTRSP = self.LASTRSP + self.handle.before
+        elif index == 2:
+            main.log.error( "Command not found" )
+            self.LASTRSP = self.LASTRSP + self.handle.before
+        elif index == 3:
+            main.log.error( "Expected Prompt not found, Time Out!!" )
+            main.log.error( expectPrompt )
+            self.LASTRSP = self.LASTRSP + self.handle.before
+            return self.LASTRSP
+        elif index == 4:
+            self.LASTRSP = self.LASTRSP + self.handle.before
+            # self.handle.send( args[ "MORE" ] )
+            self.handle.sendcontrol( "D" )
+            main.log.info(
+                "Found More screen to go, Sending a key to proceed" )
+            indexMore = self.handle.expect(
+                [ "^:$", expectPrompt ], timeout=timeoutVar )
+            while indexMore == 0:
+                main.log.info(
+                    "Found another More screen to go, Sending a key to proceed" )
+                self.handle.sendcontrol( "D" )
+                indexMore = self.handle.expect(
+                    [ "^:$", expectPrompt ], timeout=timeoutVar )
+                self.LASTRSP = self.LASTRSP + self.handle.before
+        main.last_response = self.remove_contol_chars( self.LASTRSP )
+        return self.LASTRSP
+
+    def remove_contol_chars( self, response ):
+        # RE_XML_ILLEGAL = '([\u0000-\u0008\u000b-\u000c\u000e-\u001f\ufffe-\uffff])|([%s-%s][^%s-%s])|([^%s-%s][%s-%s])|([%s-%s]$)|(^[%s-%s])'%( unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ),unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ),unichr( 0xd800 ),unichr( 0xdbff ),unichr( 0xdc00 ),unichr( 0xdfff ) )
+        # response = re.sub( RE_XML_ILLEGAL, "\n", response )
+        response = re.sub( r"[\x01-\x1F\x7F]", "", response )
+        # response = re.sub( r"\[\d+\;1H", "\n", response )
+        response = re.sub( r"\[\d+\;\d+H", "", response )
+        return response
+
+    def runAsSudoUser( self, handle, pwd, default ):
+
+        i = handle.expect( [ ".ssword:*", default, pexpect.EOF ] )
+        if i == 0:
+            handle.sendline( pwd )
+            handle.sendline( "\n" )
+
+        if i == 1:
+            handle.expect( default )
+
+        if i == 2:
+            main.log.error( "Unable to run as Sudo user" )
+
+        return handle
+
+    def onfail( self ):
+        if 'onfail' in main.componentDictionary[ self.name ]:
+            commandList = main.componentDictionary[
+                self.name ][ 'onfail' ].split( "," )
+            for command in commandList:
+                response = self.execute(
+                    cmd=command,
+                    prompt="(.*)",
+                    timeout=120 )
+
+    def secureCopy( self, userName, ipAddress, filePath, dstPath, pwd="",
+                    direction="from" ):
+        """
+        Definition:
+            Execute scp command in linux to copy to/from a remote host
+        Required:
+            str userName - User name of the remote host
+            str ipAddress - IP address of the remote host
+            str filePath - File path including the file it self
+            str dstPath - Destination path
+        Optional:
+            str pwd - Password of the host
+            str direction - Direction of the scp, default to "from" which means
+                            copy "from" the remote machine to local machine,
+                            while "to" means copy "to" the remote machine from
+                            local machine
+        """
+        returnVal = main.TRUE
+        ssh_newkey = 'Are you sure you want to continue connecting'
+        refused = "ssh: connect to host " + \
+                  ipAddress + " port 22: Connection refused"
+
+        if direction == "from":
+            cmd = 'scp ' + str( userName ) + '@' + str( ipAddress ) + ':' + \
+                  str( filePath ) + ' ' + str( dstPath )
+        elif direction == "to":
+            cmd = 'scp ' + str( filePath ) + ' ' + str( userName ) + \
+                  '@' + str( ipAddress ) + ':' + str( dstPath )
+        else:
+            main.log.debug( "Wrong direction using secure copy command!" )
+            return main.FALSE
+
+        main.log.info( "Sending: " + cmd )
+        self.handle.sendline( cmd )
+        i = 0
+        while i < 2:
+            i = self.handle.expect( [
+                                ssh_newkey,
+                                'password:',
+                                "100%",
+                                refused,
+                                "No such file or directory",
+                                pexpect.EOF,
+                                pexpect.TIMEOUT ],
+                                120 )
+            if i == 0:  # ask for ssh key confirmation
+                main.log.info( "ssh key confirmation received, sending yes" )
+                self.handle.sendline( 'yes' )
+            elif i == 1:  # Asked for ssh password
+                main.log.info( "ssh connection asked for password, gave password" )
+                self.handle.sendline( pwd )
+            elif i == 2:  # File finished transfering
+                main.log.info( "Secure copy successful" )
+                returnVal = main.TRUE
+            elif i == 3:  # Connection refused
+                main.log.error(
+                    "ssh: connect to host " +
+                    ipAddress +
+                    " port 22: Connection refused" )
+                returnVal = main.FALSE
+            elif i == 4:  # File Not found
+                main.log.error( "No such file found" )
+                returnVal = main.FALSE
+            elif i == 5:  # EOF
+                main.log.error( "Pexpect.EOF found!!!" )
+                main.cleanup()
+                main.exit()
+            elif i == 6:  # timeout
+                main.log.error(
+                    "No route to the Host " +
+                    userName +
+                    "@" +
+                    ipAddress )
+                returnVal = main.FALSE
+        self.handle.expect( "\$" )
+        return returnVal
+
+    def scp( self, remoteHost, filePath, dstPath, direction="from" ):
+        """
+        Definition:
+            Execute scp command in linux to copy to/from a remote host
+        Required:
+            * remoteHost - Test ON component to be parsed
+            str filePath - File path including the file it self
+            str dstPath - Destination path
+        Optional:
+            str direction - Direction of the scp, default to "from" which means
+                            copy "from" the remote machine to local machine,
+                            while "to" means copy "to" the remote machine from
+                            local machine
+        """
+        return self.secureCopy( remoteHost.user_name,
+                                remoteHost.ip_address,
+                                filePath,
+                                dstPath,
+                                pwd=remoteHost.pwd,
+                                direction=direction )
diff --git a/src/test/cli/component.py b/src/test/cli/component.py
new file mode 100644
index 0000000..6cf4cd8
--- /dev/null
+++ b/src/test/cli/component.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+"""
+Created on 24-Oct-2012
+
+author:s: Anil Kumar ( anilkumar.s@paxterrasolutions.com ),
+          Raghav Kashyap( raghavkashyap@paxterrasolutions.com )
+
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    ( at your option ) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+
+
+
+"""
+import logging
+from clicommon import *
+
+class Component( object ):
+
+    """
+    This is the tempalte class for components
+    """
+    def __init__( self ):
+        self.default = ''
+        self.wrapped = sys.modules[ __name__ ]
+        self.count = 0
+
+    def __getattr__( self, name ):
+        """
+         This will invoke, if the attribute wasn't found the usual ways.
+         Here it will look for assert_attribute and will execute when
+         AttributeError occurs.
+         It will return the result of the assert_attribute.
+        """
+        try:
+            return getattr( self.wrapped, name )
+        except AttributeError as error:
+            # NOTE: The first time we load a driver module we get this error
+            if "'module' object has no attribute '__path__'" in error:
+                pass
+            else:
+                main.log.error( str(error.__class__) + " " + str(error) )
+            try:
+                def experimentHandling( *args, **kwargs ):
+                    if main.EXPERIMENTAL_MODE == main.TRUE:
+                        result = self.experimentRun( *args, **kwargs )
+                        main.log.info( "EXPERIMENTAL MODE. API " +
+                                       str( name ) +
+                                       " not yet implemented. " +
+                                       "Returning dummy values" )
+                        return result
+                    else:
+                        return main.FALSE
+                return experimentHandling
+            except TypeError as e:
+                main.log.error( "Arguments for experimental mode does not" +
+                                " have key 'retruns'" + e )
+
+    def connect( self ):
+
+        vars( main )[ self.name + 'log' ] = logging.getLogger( self.name )
+
+        session_file = main.logdir + "/" + self.name + ".session"
+        self.log_handler = logging.FileHandler( session_file )
+        self.log_handler.setLevel( logging.DEBUG )
+
+        vars( main )[ self.name + 'log' ].setLevel( logging.DEBUG )
+        _formatter = logging.Formatter(
+            "%(asctime)s  %(name)-10s: %(levelname)-8s: %(message)s" )
+        self.log_handler.setFormatter( _formatter )
+        vars( main )[ self.name + 'log' ].addHandler( self.log_handler )
+        # Adding header for the component log
+        vars( main )[ self.name + 'log' ].info( main.logHeader )
+        # Opening the session log to append command's execution output
+        self.logfile_handler = open( session_file, "a" )
+
+        return "Dummy"
+
+    def execute( self, cmd ):
+        return main.TRUE
+        # import commands
+        # return commands.getoutput( cmd )
+
+    def disconnect( self ):
+        return main.TRUE
+
+    def config( self ):
+        self = self
+        # Need to update the configuration code
+
+    def cleanup( self ):
+        return main.TRUE
+
+    def log( self, message ):
+        """
+        Here finding the for the component to which the
+        log message based on the called child object.
+        """
+        vars( main )[ self.name + 'log' ].info( "\n" + message + "\n" )
+
+    def close_log_handles( self ):
+        vars( main )[ self.name + 'log' ].removeHandler( self.log_handler )
+        if self.logfile_handler:
+            self.logfile_handler.close()
+
+    def get_version( self ):
+        return "Version unknown"
+
+    def experimentRun( self, *args, **kwargs ):
+        # FIXME handle *args
+        args = utilities.parse_args( [ "RETURNS" ], **kwargs )
+        return args[ "RETURNS" ]
+
+
+if __name__ != "__main__":
+    import sys
+    sys.modules[ __name__ ] = Component()
+
diff --git a/src/test/cli/onosclidriver.py b/src/test/cli/onosclidriver.py
new file mode 100644
index 0000000..b2e0526
--- /dev/null
+++ b/src/test/cli/onosclidriver.py
@@ -0,0 +1,4596 @@
+#!/usr/bin/env python
+
+"""
+This driver enters the onos> prompt to issue commands.
+
+Please follow the coding style demonstrated by existing
+functions and document properly.
+
+If you are a contributor to the driver, please
+list your email here for future contact:
+
+jhall@onlab.us
+andrew@onlab.us
+shreya@onlab.us
+
+OCT 13 2014
+
+"""
+import pexpect
+import re
+import json
+import types
+import time
+import os
+from clidriver import CLI
+from clicommon import *
+
+class OnosCliDriver( CLI ):
+
+    def __init__( self, controller = None, connect = True):
+        """
+        Initialize client
+        """
+        self.name = None
+        self.home = None
+        self.handle = None
+        self.controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
+        super( CLI, self ).__init__()
+        if connect == True:
+            self.connect_cli()
+
+    def connect_cli(self):
+        options = { 'name' : 'onoscli', 'onosIp': '{0}'.format(self.controller) }
+        main.log.info('Connecting to controller at %s' %self.controller)
+        self.connect(name = options['name'], user_name = 'onos', pwd = 'rocks',
+                     ip_address = self.controller, port = '8101', options = options)
+
+    def connect( self, **connectargs ):
+        """
+        Creates ssh handle for ONOS cli.
+        """
+        try:
+            for key in connectargs:
+                vars( self )[ key ] = connectargs[ key ]
+            self.home = "~/onos"
+            for key in self.options:
+                if key == "home":
+                    self.home = self.options[ 'home' ]
+                    break
+            if self.home is None or self.home == "":
+                self.home = "~/onos"
+
+            for key in self.options:
+                if key == 'onosIp':
+                    self.onosIp = self.options[ 'onosIp' ]
+                    break
+
+            self.name = self.options[ 'name' ]
+
+            try:
+                if os.getenv( str( self.ip_address ) ) is not None:
+                    self.ip_address = os.getenv( str( self.ip_address ) )
+                else:
+                    main.log.info( self.name +
+                                   ": Trying to connect to " +
+                                   self.ip_address )
+
+            except KeyError:
+                main.log.info( "Invalid host name," +
+                               " connecting to local host instead" )
+                self.ip_address = 'localhost'
+            except Exception as inst:
+                main.log.error( "Uncaught exception: " + str( inst ) )
+
+            self.handle = super( OnosCliDriver, self ).connect(
+                user_name=self.user_name,
+                ip_address=self.ip_address,
+                port=self.port,
+                pwd=self.pwd,
+                home=self.home )
+
+            #self.handle.sendline( "cd " + self.home )
+            #self.handle.expect( "\$" )
+            if self.handle:
+                return self.handle
+            else:
+                main.log.info( "NO ONOS HANDLE" )
+                return main.FALSE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":     " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def disconnect( self ):
+        """
+        Called when Test is complete to disconnect the ONOS handle.
+        """
+        response = main.TRUE
+        try:
+            if self.handle:
+                i = self.logout()
+                if i == main.TRUE:
+                    self.handle.sendline( "" )
+                    self.handle.expect( "\$" )
+                    self.handle.sendline( "exit" )
+                    self.handle.expect( "closed" )
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            response = main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":     " + self.handle.before )
+        except ValueError:
+            main.log.exception( "Exception in disconnect of " + self.name )
+            response = main.TRUE
+        except Exception:
+            main.log.exception( self.name + ": Connection failed to the host" )
+            response = main.FALSE
+        return response
+
+    def logout( self ):
+        """
+        Sends 'logout' command to ONOS cli
+        Returns main.TRUE if exited CLI and
+                main.FALSE on timeout (not guranteed you are disconnected)
+                None on TypeError
+                Exits test on unknown error or pexpect exits unexpectedly
+        """
+        try:
+            if self.handle:
+                self.handle.sendline( "" )
+                i = self.handle.expect( [ "onos>", "\$", pexpect.TIMEOUT ],
+                                        timeout=10 )
+                if i == 0:  # In ONOS CLI
+                    self.handle.sendline( "logout" )
+                    j = self.handle.expect( [ "\$",
+                                              "Command not found:",
+                                              pexpect.TIMEOUT ] )
+                    if j == 0:  # Successfully logged out
+                        return main.TRUE
+                    elif j == 1 or j == 2:
+                        # ONOS didn't fully load, and logout command isn't working
+                        # or the command timed out
+                        self.handle.send( "\x04" )  # send ctrl-d
+                        self.handle.expect( "\$" )
+                        return main.TRUE
+                    else: # some other output
+                        main.log.warn( "Unknown repsonse to logout command: '{}'",
+                                       repr( self.handle.before ) )
+                        return main.FALSE
+                elif i == 1:  # not in CLI
+                    return main.TRUE
+                elif i == 3:  # Timeout
+                    return main.FALSE
+            else:
+                return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": eof exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except ValueError:
+            main.log.error( self.name +
+                            "ValueError exception in logout method" )
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setCell( self, cellname ):
+        """
+        Calls 'cell <name>' to set the environment variables on ONOSbench
+
+        Before issuing any cli commands, set the environment variable first.
+        """
+        try:
+            if not cellname:
+                main.log.error( "Must define cellname" )
+                main.cleanup()
+                main.exit()
+            else:
+                self.handle.sendline( "cell " + str( cellname ) )
+                # Expect the cellname in the ONOSCELL variable.
+                # Note that this variable name is subject to change
+                #   and that this driver will have to change accordingly
+                self.handle.expect(str(cellname))
+                handleBefore = self.handle.before
+                handleAfter = self.handle.after
+                # Get the rest of the handle
+                self.handle.sendline("")
+                self.handle.expect("\$")
+                handleMore = self.handle.before
+
+                main.log.info( "Cell call returned: " + handleBefore +
+                               handleAfter + handleMore )
+
+                return main.TRUE
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": eof exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def startOnosCli( self, ONOSIp, karafTimeout="",
+                      commandlineTimeout=10, onosStartTimeout=60 ):
+        """
+        karafTimeout is an optional argument. karafTimeout value passed
+        by user would be used to set the current karaf shell idle timeout.
+        Note that when ever this property is modified the shell will exit and
+        the subsequent login would reflect new idle timeout.
+        Below is an example to start a session with 60 seconds idle timeout
+        ( input value is in milliseconds ):
+
+        tValue = "60000"
+        main.ONOScli1.startOnosCli( ONOSIp, karafTimeout=tValue )
+
+        Note: karafTimeout is left as str so that this could be read
+        and passed to startOnosCli from PARAMS file as str.
+        """
+        self.onosIp = ONOSIp
+        try:
+            self.handle.sendline( "" )
+            x = self.handle.expect( [
+                "\$", "onos>" ], commandlineTimeout)
+
+            if x == 1:
+                main.log.info( "ONOS cli is already running" )
+                return main.TRUE
+
+            # Wait for onos start ( -w ) and enter onos cli
+            self.handle.sendline( "onos -w " + str( ONOSIp ) )
+            i = self.handle.expect( [
+                "onos>",
+                pexpect.TIMEOUT ], onosStartTimeout )
+
+            if i == 0:
+                main.log.info( str( ONOSIp ) + " CLI Started successfully" )
+                if karafTimeout:
+                    self.handle.sendline(
+                        "config:property-set -p org.apache.karaf.shell\
+                                 sshIdleTimeout " +
+                        karafTimeout )
+                    self.handle.expect( "\$" )
+                    self.handle.sendline( "onos -w " + str( ONOSIp ) )
+                    self.handle.expect( "onos>" )
+                return main.TRUE
+            else:
+                # If failed, send ctrl+c to process and try again
+                main.log.info( "Starting CLI failed. Retrying..." )
+                self.handle.send( "\x03" )
+                self.handle.sendline( "onos -w " + str( ONOSIp ) )
+                i = self.handle.expect( [ "onos>", pexpect.TIMEOUT ],
+                                        timeout=30 )
+                if i == 0:
+                    main.log.info( str( ONOSIp ) + " CLI Started " +
+                                   "successfully after retry attempt" )
+                    if karafTimeout:
+                        self.handle.sendline(
+                            "config:property-set -p org.apache.karaf.shell\
+                                    sshIdleTimeout " +
+                            karafTimeout )
+                        self.handle.expect( "\$" )
+                        self.handle.sendline( "onos -w " + str( ONOSIp ) )
+                        self.handle.expect( "onos>" )
+                    return main.TRUE
+                else:
+                    main.log.error( "Connection to CLI " +
+                                    str( ONOSIp ) + " timeout" )
+                    return main.FALSE
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def log( self, cmdStr, level="" ):
+        """
+            log  the commands in the onos CLI.
+            returns main.TRUE on success
+            returns main.FALSE if Error occurred
+            Available level: DEBUG, TRACE, INFO, WARN, ERROR
+            Level defaults to INFO
+        """
+        try:
+            lvlStr = ""
+            if level:
+                lvlStr = "--level=" + level
+
+            self.handle.sendline( "" )
+            i = self.handle.expect( [ "onos>", "\$", pexpect.TIMEOUT ] )
+            if i == 1:
+                main.log.error( self.name + ": onos cli session closed. ")
+                if self.onosIp:
+                    main.log.warn( "Trying to reconnect " + self.onosIp )
+                    reconnectResult = self.startOnosCli( self.onosIp )
+                    if reconnectResult:
+                        main.log.info( self.name + ": onos cli session reconnected." )
+                    else:
+                        main.log.error( self.name + ": reconnection failed." )
+                        main.cleanup()
+                        main.exit()
+                else:
+                    main.cleanup()
+                    main.exit()
+            if i == 2:
+                self.handle.sendline( "" )
+                self.handle.expect( "onos>" )
+            self.handle.sendline( "log:log " + lvlStr + " " + cmdStr )
+            self.handle.expect( "log:log" )
+            self.handle.expect( "onos>" )
+
+            response = self.handle.before
+            if re.search( "Error", response ):
+                return main.FALSE
+            return main.TRUE
+        except pexpect.TIMEOUT:
+            main.log.exception( self.name + ": TIMEOUT exception found" )
+            main.cleanup()
+            main.exit()
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def sendline( self, cmdStr, showResponse=False, debug=False, timeout=10 ):
+        """
+        Send a completely user specified string to
+        the onos> prompt. Use this function if you have
+        a very specific command to send.
+
+        Warning: There are no sanity checking to commands
+        sent using this method.
+
+        """
+        try:
+            logStr = "\"Sending CLI command: '" + cmdStr + "'\""
+            self.log( logStr )
+            self.handle.sendline( cmdStr )
+            i = self.handle.expect( ["onos>", "\$"], timeout )
+            response = self.handle.before
+            # TODO: do something with i
+            main.log.info( "Command '" + str( cmdStr ) + "' sent to "
+                           + self.name + "." )
+            if debug:
+                main.log.debug( self.name + ": Raw output" )
+                main.log.debug( self.name + ": " + repr( response ) )
+
+            # Remove ANSI color control strings from output
+            ansiEscape = re.compile( r'\x1b[^m]*m' )
+            response = ansiEscape.sub( '', response )
+            if debug:
+                main.log.debug( self.name + ": ansiEscape output" )
+                main.log.debug( self.name + ": " + repr( response ) )
+
+            # Remove extra return chars that get added
+            response = re.sub(  r"\s\r", "", response )
+            if debug:
+                main.log.debug( self.name + ": Removed extra returns " +
+                                "from output" )
+                main.log.debug( self.name + ": " + repr( response ) )
+
+            # Strip excess whitespace
+            response = response.strip()
+            if debug:
+                main.log.debug( self.name + ": parsed and stripped output" )
+                main.log.debug( self.name + ": " + repr( response ) )
+
+            # parse for just the output, remove the cmd from response
+            output = response.split( cmdStr.strip(), 1 )
+            if debug:
+                main.log.debug( self.name + ": split output" )
+                for r in output:
+                    main.log.debug( self.name + ": " + repr( r ) )
+            output = output[1].strip()
+            if showResponse:
+                main.log.info( "Response from ONOS: {}".format( output ) )
+            return output
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ":ONOS timeout" )
+            if debug:
+                main.log.debug( self.handle.before )
+            return None
+        except IndexError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    # IMPORTANT NOTE:
+    # For all cli commands, naming convention should match
+    # the cli command changing 'a:b' with 'aB'.
+    # Ex ) onos:topology > onosTopology
+    #    onos:links    > onosLinks
+    #    feature:list  > featureList
+
+    def addNode( self, nodeId, ONOSIp, tcpPort="" ):
+        """
+        Adds a new cluster node by ID and address information.
+        Required:
+            * nodeId
+            * ONOSIp
+        Optional:
+            * tcpPort
+        """
+        try:
+            cmdStr = "add-node " + str( nodeId ) + " " +\
+                str( ONOSIp ) + " " + str( tcpPort )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding node" )
+                main.log.error( handle )
+                return main.FALSE
+            else:
+                main.log.info( "Node " + str( ONOSIp ) + " added" )
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def removeNode( self, nodeId ):
+        """
+        Removes a cluster by ID
+        Issues command: 'remove-node [<node-id>]'
+        Required:
+            * nodeId
+        """
+        try:
+
+            cmdStr = "remove-node " + str( nodeId )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in removing node" )
+                main.log.error( handle )
+                return main.FALSE
+            else:
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def nodes( self, jsonFormat=True):
+        """
+        List the nodes currently visible
+        Issues command: 'nodes'
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "nodes"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def topology( self ):
+        """
+        Definition:
+            Returns the output of topology command.
+        Return:
+            topology = current ONOS topology
+        """
+        try:
+            cmdStr = "topology -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            main.log.info( cmdStr + " returned: " + str( handle ) )
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def deviceRemove( self, deviceId ):
+        """
+        Removes particular device from storage
+
+        TODO: refactor this function
+        """
+        try:
+            cmdStr = "device-remove " + str( deviceId )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in removing device" )
+                main.log.error( handle )
+                return main.FALSE
+            else:
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def devices( self, jsonFormat=True ):
+        """
+        Lists all infrastructure devices or switches
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "devices"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def balanceMasters( self ):
+        """
+        This balances the devices across all controllers
+        by issuing command: 'onos> onos:balance-masters'
+        If required this could be extended to return devices balanced output.
+        """
+        try:
+            cmdStr = "onos:balance-masters"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in balancing masters" )
+                main.log.error( handle )
+                return main.FALSE
+            else:
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def checkMasters( self, jsonFormat=True  ):
+        """
+            Returns the output of the masters command.
+            Optional argument:
+                * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "onos:masters"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def checkBalanceMasters( self, jsonFormat=True ):
+        """
+            Uses the master command to check that the devices' leadership
+            is evenly divided
+
+            Dependencies: checkMasters() and summary()
+
+            Returns main.True if the devices are balanced
+            Returns main.False if the devices are unbalanced
+            Exits on Exception
+            Returns None on TypeError
+        """
+        try:
+            summaryOutput = self.summary()
+            totalDevices = json.loads( summaryOutput )[ "devices" ]
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, summaryOutput ) )
+            return None
+        try:
+            totalOwnedDevices = 0
+            mastersOutput = self.checkMasters()
+            masters = json.loads( mastersOutput )
+            first = masters[ 0 ][ "size" ]
+            for master in masters:
+                totalOwnedDevices += master[ "size" ]
+                if master[ "size" ] > first + 1 or master[ "size" ] < first - 1:
+                    main.log.error( "Mastership not balanced" )
+                    main.log.info( "\n" + self.checkMasters( False ) )
+                    return main.FALSE
+            main.log.info( "Mastership balanced between " \
+                            + str( len(masters) ) + " masters" )
+            return main.TRUE
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, mastersOutput ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def links( self, jsonFormat=True ):
+        """
+        Lists all core links
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "links"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def ports( self, jsonFormat=True ):
+        """
+        Lists all ports
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "ports"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def roles( self, jsonFormat=True ):
+        """
+        Lists all devices and the controllers with roles assigned to them
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "roles"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getRole( self, deviceId ):
+        """
+        Given the a string containing the json representation of the "roles"
+        cli command and a partial or whole device id, returns a json object
+        containing the roles output for the first device whose id contains
+        "device_id"
+
+        Returns:
+        A dict of the role assignments for the given device or
+        None if no match
+        """
+        try:
+            if deviceId is None:
+                return None
+            else:
+                rawRoles = self.roles()
+                rolesJson = json.loads( rawRoles )
+                # search json for the device with id then return the device
+                for device in rolesJson:
+                    # print device
+                    if str( deviceId ) in device[ 'id' ]:
+                        return device
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawRoles ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def rolesNotNull( self ):
+        """
+        Iterates through each device and checks if there is a master assigned
+        Returns: main.TRUE if each device has a master
+                 main.FALSE any device has no master
+        """
+        try:
+            rawRoles = self.roles()
+            rolesJson = json.loads( rawRoles )
+            # search json for the device with id then return the device
+            for device in rolesJson:
+                # print device
+                if device[ 'master' ] == "none":
+                    main.log.warn( "Device has no master: " + str( device ) )
+                    return main.FALSE
+            return main.TRUE
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawRoles ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def paths( self, srcId, dstId ):
+        """
+        Returns string of paths, and the cost.
+        Issues command: onos:paths <src> <dst>
+        """
+        try:
+            cmdStr = "onos:paths " + str( srcId ) + " " + str( dstId )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in getting paths" )
+                return ( handle, "Error" )
+            else:
+                path = handle.split( ";" )[ 0 ]
+                cost = handle.split( ";" )[ 1 ]
+                return ( path, cost )
+        except AssertionError:
+            main.log.exception( "" )
+            return ( handle, "Error" )
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return ( handle, "Error" )
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def hosts( self, jsonFormat=True ):
+        """
+        Lists all discovered hosts
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "hosts"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in handle
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in handle
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( handle ) )
+                return None
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getHost( self, mac ):
+        """
+        Return the first host from the hosts api whose 'id' contains 'mac'
+
+        Note: mac must be a colon separated mac address, but could be a
+              partial mac address
+
+        Return None if there is no match
+        """
+        try:
+            if mac is None:
+                return None
+            else:
+                mac = mac
+                rawHosts = self.hosts()
+                hostsJson = json.loads( rawHosts )
+                # search json for the host with mac then return the device
+                for host in hostsJson:
+                    # print "%s in  %s?" % ( mac, host[ 'id' ] )
+                    if not host:
+                        pass
+                    elif mac in host[ 'id' ]:
+                        return host
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawHosts ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getHostsId( self, hostList ):
+        """
+        Obtain list of hosts
+        Issues command: 'onos> hosts'
+
+        Required:
+            * hostList: List of hosts obtained by Mininet
+        IMPORTANT:
+            This function assumes that you started your
+            topology with the option '--mac'.
+            Furthermore, it assumes that value of VLAN is '-1'
+        Description:
+            Converts mininet hosts ( h1, h2, h3... ) into
+            ONOS format ( 00:00:00:00:00:01/-1 , ... )
+        """
+        try:
+            onosHostList = []
+
+            for host in hostList:
+                host = host.replace( "h", "" )
+                hostHex = hex( int( host ) ).zfill( 12 )
+                hostHex = str( hostHex ).replace( 'x', '0' )
+                i = iter( str( hostHex ) )
+                hostHex = ":".join( a + b for a, b in zip( i, i ) )
+                hostHex = hostHex + "/-1"
+                onosHostList.append( hostHex )
+
+            return onosHostList
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addHostIntent( self, hostIdOne, hostIdTwo ):
+        """
+        Required:
+            * hostIdOne: ONOS host id for host1
+            * hostIdTwo: ONOS host id for host2
+        Description:
+            Adds a host-to-host intent ( bidirectional ) by
+            specifying the two hosts.
+        Returns:
+            A string of the intent id or None on Error
+        """
+        try:
+            cmdStr = "add-host-intent " + str( hostIdOne ) +\
+                " " + str( hostIdTwo )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding Host intent" )
+                main.log.debug( "Response from ONOS was: " + repr( handle ) )
+                return None
+            else:
+                main.log.info( "Host intent installed between " +
+                               str( hostIdOne ) + " and " + str( hostIdTwo ) )
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    main.log.debug( "Response from ONOS was: " +
+                                    repr( handle ) )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addOpticalIntent( self, ingressDevice, egressDevice ):
+        """
+        Required:
+            * ingressDevice: device id of ingress device
+            * egressDevice: device id of egress device
+        Optional:
+            TODO: Still needs to be implemented via dev side
+        Description:
+            Adds an optical intent by specifying an ingress and egress device
+        Returns:
+            A string of the intent id or None on error
+        """
+        try:
+            cmdStr = "add-optical-intent " + str( ingressDevice ) +\
+                " " + str( egressDevice )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            # If error, return error message
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding Optical intent" )
+                return None
+            else:
+                main.log.info( "Optical intent installed between " +
+                               str( ingressDevice ) + " and " +
+                               str( egressDevice ) )
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addPointIntent(
+            self,
+            ingressDevice,
+            egressDevice,
+            portIngress="",
+            portEgress="",
+            ethType="",
+            ethSrc="",
+            ethDst="",
+            bandwidth="",
+            lambdaAlloc=False,
+            ipProto="",
+            ipSrc="",
+            ipDst="",
+            tcpSrc="",
+            tcpDst="" ):
+        """
+        Required:
+            * ingressDevice: device id of ingress device
+            * egressDevice: device id of egress device
+        Optional:
+            * ethType: specify ethType
+            * ethSrc: specify ethSrc ( i.e. src mac addr )
+            * ethDst: specify ethDst ( i.e. dst mac addr )
+            * bandwidth: specify bandwidth capacity of link
+            * lambdaAlloc: if True, intent will allocate lambda
+              for the specified intent
+            * ipProto: specify ip protocol
+            * ipSrc: specify ip source address
+            * ipDst: specify ip destination address
+            * tcpSrc: specify tcp source port
+            * tcpDst: specify tcp destination port
+        Description:
+            Adds a point-to-point intent ( uni-directional ) by
+            specifying device id's and optional fields
+        Returns:
+            A string of the intent id or None on error
+
+        NOTE: This function may change depending on the
+              options developers provide for point-to-point
+              intent via cli
+        """
+        try:
+            # If there are no optional arguments
+            if not ethType and not ethSrc and not ethDst\
+                    and not bandwidth and not lambdaAlloc \
+                    and not ipProto and not ipSrc and not ipDst \
+                    and not tcpSrc and not tcpDst:
+                cmd = "add-point-intent"
+
+            else:
+                cmd = "add-point-intent"
+
+                if ethType:
+                    cmd += " --ethType " + str( ethType )
+                if ethSrc:
+                    cmd += " --ethSrc " + str( ethSrc )
+                if ethDst:
+                    cmd += " --ethDst " + str( ethDst )
+                if bandwidth:
+                    cmd += " --bandwidth " + str( bandwidth )
+                if lambdaAlloc:
+                    cmd += " --lambda "
+                if ipProto:
+                    cmd += " --ipProto " + str( ipProto )
+                if ipSrc:
+                    cmd += " --ipSrc " + str( ipSrc )
+                if ipDst:
+                    cmd += " --ipDst " + str( ipDst )
+                if tcpSrc:
+                    cmd += " --tcpSrc " + str( tcpSrc )
+                if tcpDst:
+                    cmd += " --tcpDst " + str( tcpDst )
+
+            # Check whether the user appended the port
+            # or provided it as an input
+            if "/" in ingressDevice:
+                cmd += " " + str( ingressDevice )
+            else:
+                if not portIngress:
+                    main.log.error( "You must specify the ingress port" )
+                    # TODO: perhaps more meaningful return
+                    #       Would it make sense to throw an exception and exit
+                    #       the test?
+                    return None
+
+                cmd += " " + \
+                    str( ingressDevice ) + "/" +\
+                    str( portIngress ) + " "
+
+            if "/" in egressDevice:
+                cmd += " " + str( egressDevice )
+            else:
+                if not portEgress:
+                    main.log.error( "You must specify the egress port" )
+                    return None
+
+                cmd += " " +\
+                    str( egressDevice ) + "/" +\
+                    str( portEgress )
+
+            handle = self.sendline( cmd )
+            assert "Command not found:" not in handle, handle
+            # If error, return error message
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding point-to-point intent" )
+                return None
+            else:
+                # TODO: print out all the options in this message?
+                main.log.info( "Point-to-point intent installed between " +
+                               str( ingressDevice ) + " and " +
+                               str( egressDevice ) )
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addMultipointToSinglepointIntent(
+            self,
+            ingressDeviceList,
+            egressDevice,
+            portIngressList=None,
+            portEgress="",
+            ethType="",
+            ethSrc="",
+            ethDst="",
+            bandwidth="",
+            lambdaAlloc=False,
+            ipProto="",
+            ipSrc="",
+            ipDst="",
+            tcpSrc="",
+            tcpDst="",
+            setEthSrc="",
+            setEthDst="" ):
+        """
+        Note:
+            This function assumes the format of all ingress devices
+            is same. That is, all ingress devices include port numbers
+            with a "/" or all ingress devices could specify device
+            ids and port numbers seperately.
+        Required:
+            * ingressDeviceList: List of device ids of ingress device
+                ( Atleast 2 ingress devices required in the list )
+            * egressDevice: device id of egress device
+        Optional:
+            * ethType: specify ethType
+            * ethSrc: specify ethSrc ( i.e. src mac addr )
+            * ethDst: specify ethDst ( i.e. dst mac addr )
+            * bandwidth: specify bandwidth capacity of link
+            * lambdaAlloc: if True, intent will allocate lambda
+              for the specified intent
+            * ipProto: specify ip protocol
+            * ipSrc: specify ip source address
+            * ipDst: specify ip destination address
+            * tcpSrc: specify tcp source port
+            * tcpDst: specify tcp destination port
+            * setEthSrc: action to Rewrite Source MAC Address
+            * setEthDst: action to Rewrite Destination MAC Address
+        Description:
+            Adds a multipoint-to-singlepoint intent ( uni-directional ) by
+            specifying device id's and optional fields
+        Returns:
+            A string of the intent id or None on error
+
+        NOTE: This function may change depending on the
+              options developers provide for multipoint-to-singlepoint
+              intent via cli
+        """
+        try:
+            # If there are no optional arguments
+            if not ethType and not ethSrc and not ethDst\
+                    and not bandwidth and not lambdaAlloc\
+                    and not ipProto and not ipSrc and not ipDst\
+                    and not tcpSrc and not tcpDst and not setEthSrc\
+                    and not setEthDst:
+                cmd = "add-multi-to-single-intent"
+
+            else:
+                cmd = "add-multi-to-single-intent"
+
+                if ethType:
+                    cmd += " --ethType " + str( ethType )
+                if ethSrc:
+                    cmd += " --ethSrc " + str( ethSrc )
+                if ethDst:
+                    cmd += " --ethDst " + str( ethDst )
+                if bandwidth:
+                    cmd += " --bandwidth " + str( bandwidth )
+                if lambdaAlloc:
+                    cmd += " --lambda "
+                if ipProto:
+                    cmd += " --ipProto " + str( ipProto )
+                if ipSrc:
+                    cmd += " --ipSrc " + str( ipSrc )
+                if ipDst:
+                    cmd += " --ipDst " + str( ipDst )
+                if tcpSrc:
+                    cmd += " --tcpSrc " + str( tcpSrc )
+                if tcpDst:
+                    cmd += " --tcpDst " + str( tcpDst )
+                if setEthSrc:
+                    cmd += " --setEthSrc " + str( setEthSrc )
+                if setEthDst:
+                    cmd += " --setEthDst " + str( setEthDst )
+
+            # Check whether the user appended the port
+            # or provided it as an input
+
+            if portIngressList is None:
+                for ingressDevice in ingressDeviceList:
+                    if "/" in ingressDevice:
+                        cmd += " " + str( ingressDevice )
+                    else:
+                        main.log.error( "You must specify " +
+                                        "the ingress port" )
+                        # TODO: perhaps more meaningful return
+                        return main.FALSE
+            else:
+                if len( ingressDeviceList ) == len( portIngressList ):
+                    for ingressDevice, portIngress in zip( ingressDeviceList,
+                                                           portIngressList ):
+                        cmd += " " + \
+                            str( ingressDevice ) + "/" +\
+                            str( portIngress ) + " "
+                else:
+                    main.log.error( "Device list and port list does not " +
+                                    "have the same length" )
+                    return main.FALSE
+            if "/" in egressDevice:
+                cmd += " " + str( egressDevice )
+            else:
+                if not portEgress:
+                    main.log.error( "You must specify " +
+                                    "the egress port" )
+                    return main.FALSE
+
+                cmd += " " +\
+                    str( egressDevice ) + "/" +\
+                    str( portEgress )
+            handle = self.sendline( cmd )
+            assert "Command not found:" not in handle, handle
+            # If error, return error message
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding multipoint-to-singlepoint " +
+                                "intent" )
+                return None
+            else:
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addSinglepointToMultipointIntent(
+            self,
+            ingressDevice,
+            egressDeviceList,
+            portIngress="",
+            portEgressList=None,
+            ethType="",
+            ethSrc="",
+            ethDst="",
+            bandwidth="",
+            lambdaAlloc=False,
+            ipProto="",
+            ipSrc="",
+            ipDst="",
+            tcpSrc="",
+            tcpDst="",
+            setEthSrc="",
+            setEthDst="" ):
+        """
+        Note:
+            This function assumes the format of all egress devices
+            is same. That is, all egress devices include port numbers
+            with a "/" or all egress devices could specify device
+            ids and port numbers seperately.
+        Required:
+            * EgressDeviceList: List of device ids of egress device
+                ( Atleast 2 eress devices required in the list )
+            * ingressDevice: device id of ingress device
+        Optional:
+            * ethType: specify ethType
+            * ethSrc: specify ethSrc ( i.e. src mac addr )
+            * ethDst: specify ethDst ( i.e. dst mac addr )
+            * bandwidth: specify bandwidth capacity of link
+            * lambdaAlloc: if True, intent will allocate lambda
+              for the specified intent
+            * ipProto: specify ip protocol
+            * ipSrc: specify ip source address
+            * ipDst: specify ip destination address
+            * tcpSrc: specify tcp source port
+            * tcpDst: specify tcp destination port
+            * setEthSrc: action to Rewrite Source MAC Address
+            * setEthDst: action to Rewrite Destination MAC Address
+        Description:
+            Adds a singlepoint-to-multipoint intent ( uni-directional ) by
+            specifying device id's and optional fields
+        Returns:
+            A string of the intent id or None on error
+
+        NOTE: This function may change depending on the
+              options developers provide for singlepoint-to-multipoint
+              intent via cli
+        """
+        try:
+            # If there are no optional arguments
+            if not ethType and not ethSrc and not ethDst\
+                    and not bandwidth and not lambdaAlloc\
+                    and not ipProto and not ipSrc and not ipDst\
+                    and not tcpSrc and not tcpDst and not setEthSrc\
+                    and not setEthDst:
+                cmd = "add-single-to-multi-intent"
+
+            else:
+                cmd = "add-single-to-multi-intent"
+
+                if ethType:
+                    cmd += " --ethType " + str( ethType )
+                if ethSrc:
+                    cmd += " --ethSrc " + str( ethSrc )
+                if ethDst:
+                    cmd += " --ethDst " + str( ethDst )
+                if bandwidth:
+                    cmd += " --bandwidth " + str( bandwidth )
+                if lambdaAlloc:
+                    cmd += " --lambda "
+                if ipProto:
+                    cmd += " --ipProto " + str( ipProto )
+                if ipSrc:
+                    cmd += " --ipSrc " + str( ipSrc )
+                if ipDst:
+                    cmd += " --ipDst " + str( ipDst )
+                if tcpSrc:
+                    cmd += " --tcpSrc " + str( tcpSrc )
+                if tcpDst:
+                    cmd += " --tcpDst " + str( tcpDst )
+                if setEthSrc:
+                    cmd += " --setEthSrc " + str( setEthSrc )
+                if setEthDst:
+                    cmd += " --setEthDst " + str( setEthDst )
+
+            # Check whether the user appended the port
+            # or provided it as an input
+
+            if "/" in ingressDevice:
+                cmd += " " + str( ingressDevice )
+            else:
+                if not portIngress:
+                    main.log.error( "You must specify " +
+                                    "the Ingress port" )
+                    return main.FALSE
+
+                cmd += " " +\
+                    str( ingressDevice ) + "/" +\
+                    str( portIngress )
+
+            if portEgressList is None:
+                for egressDevice in egressDeviceList:
+                    if "/" in egressDevice:
+                        cmd += " " + str( egressDevice )
+                    else:
+                        main.log.error( "You must specify " +
+                                        "the egress port" )
+                        # TODO: perhaps more meaningful return
+                        return main.FALSE
+            else:
+                if len( egressDeviceList ) == len( portEgressList ):
+                    for egressDevice, portEgress in zip( egressDeviceList,
+                                                         portEgressList ):
+                        cmd += " " + \
+                            str( egressDevice ) + "/" +\
+                            str( portEgress )
+                else:
+                    main.log.error( "Device list and port list does not " +
+                                    "have the same length" )
+                    return main.FALSE
+            handle = self.sendline( cmd )
+            assert "Command not found:" not in handle, handle
+            # If error, return error message
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding singlepoint-to-multipoint " +
+                                "intent" )
+                return None
+            else:
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def addMplsIntent(
+            self,
+            ingressDevice,
+            egressDevice,
+            ingressPort="",
+            egressPort="",
+            ethType="",
+            ethSrc="",
+            ethDst="",
+            bandwidth="",
+            lambdaAlloc=False,
+            ipProto="",
+            ipSrc="",
+            ipDst="",
+            tcpSrc="",
+            tcpDst="",
+            ingressLabel="",
+            egressLabel="",
+            priority=""):
+        """
+        Required:
+            * ingressDevice: device id of ingress device
+            * egressDevice: device id of egress device
+        Optional:
+            * ethType: specify ethType
+            * ethSrc: specify ethSrc ( i.e. src mac addr )
+            * ethDst: specify ethDst ( i.e. dst mac addr )
+            * bandwidth: specify bandwidth capacity of link
+            * lambdaAlloc: if True, intent will allocate lambda
+              for the specified intent
+            * ipProto: specify ip protocol
+            * ipSrc: specify ip source address
+            * ipDst: specify ip destination address
+            * tcpSrc: specify tcp source port
+            * tcpDst: specify tcp destination port
+            * ingressLabel: Ingress MPLS label
+            * egressLabel: Egress MPLS label
+        Description:
+            Adds MPLS intent by
+            specifying device id's and optional fields
+        Returns:
+            A string of the intent id or None on error
+
+        NOTE: This function may change depending on the
+              options developers provide for MPLS
+              intent via cli
+        """
+        try:
+            # If there are no optional arguments
+            if not ethType and not ethSrc and not ethDst\
+                    and not bandwidth and not lambdaAlloc \
+                    and not ipProto and not ipSrc and not ipDst \
+                    and not tcpSrc and not tcpDst and not ingressLabel \
+                    and not egressLabel:
+                cmd = "add-mpls-intent"
+
+            else:
+                cmd = "add-mpls-intent"
+
+                if ethType:
+                    cmd += " --ethType " + str( ethType )
+                if ethSrc:
+                    cmd += " --ethSrc " + str( ethSrc )
+                if ethDst:
+                    cmd += " --ethDst " + str( ethDst )
+                if bandwidth:
+                    cmd += " --bandwidth " + str( bandwidth )
+                if lambdaAlloc:
+                    cmd += " --lambda "
+                if ipProto:
+                    cmd += " --ipProto " + str( ipProto )
+                if ipSrc:
+                    cmd += " --ipSrc " + str( ipSrc )
+                if ipDst:
+                    cmd += " --ipDst " + str( ipDst )
+                if tcpSrc:
+                    cmd += " --tcpSrc " + str( tcpSrc )
+                if tcpDst:
+                    cmd += " --tcpDst " + str( tcpDst )
+                if ingressLabel:
+                    cmd += " --ingressLabel " + str( ingressLabel )
+                if egressLabel:
+                    cmd += " --egressLabel " + str( egressLabel )
+                if priority:
+                    cmd += " --priority " + str( priority )
+
+            # Check whether the user appended the port
+            # or provided it as an input
+            if "/" in ingressDevice:
+                cmd += " " + str( ingressDevice )
+            else:
+                if not ingressPort:
+                    main.log.error( "You must specify the ingress port" )
+                    return None
+
+                cmd += " " + \
+                    str( ingressDevice ) + "/" +\
+                    str( ingressPort ) + " "
+
+            if "/" in egressDevice:
+                cmd += " " + str( egressDevice )
+            else:
+                if not egressPort:
+                    main.log.error( "You must specify the egress port" )
+                    return None
+
+                cmd += " " +\
+                    str( egressDevice ) + "/" +\
+                    str( egressPort )
+
+            handle = self.sendline( cmd )
+            assert "Command not found:" not in handle, handle
+            # If error, return error message
+            if re.search( "Error", handle ):
+                main.log.error( "Error in adding mpls intent" )
+                return None
+            else:
+                # TODO: print out all the options in this message?
+                main.log.info( "MPLS intent installed between " +
+                               str( ingressDevice ) + " and " +
+                               str( egressDevice ) )
+                match = re.search('id=0x([\da-f]+),', handle)
+                if match:
+                    return match.group()[3:-1]
+                else:
+                    main.log.error( "Error, intent ID not found" )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def removeIntent( self, intentId, app='org.onosproject.cli',
+                      purge=False, sync=False ):
+        """
+        Remove intent for specified application id and intent id
+        Optional args:-
+        -s or --sync: Waits for the removal before returning
+        -p or --purge: Purge the intent from the store after removal
+
+        Returns:
+            main.False on error and
+            cli output otherwise
+        """
+        try:
+            cmdStr = "remove-intent"
+            if purge:
+                cmdStr += " -p"
+            if sync:
+                cmdStr += " -s"
+
+            cmdStr += " " + app + " " + str( intentId )
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in removing intent" )
+                return main.FALSE
+            else:
+                # TODO: Should this be main.TRUE
+                return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def removeAllIntents( self, purge=False, sync=False, app='org.onosproject.cli' ):
+        """
+        Description:
+            Remove all the intents
+        Optional args:-
+            -s or --sync: Waits for the removal before returning
+            -p or --purge: Purge the intent from the store after removal
+        Returns:
+            Returns main.TRUE if all intents are removed, otherwise returns
+            main.FALSE; Returns None for exception
+        """
+        try:
+            cmdStr = "remove-intent"
+            if purge:
+                cmdStr += " -p"
+            if sync:
+                cmdStr += " -s"
+
+            cmdStr += " " + app
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in removing intent" )
+                return main.FALSE
+            else:
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def purgeWithdrawnIntents( self ):
+        """
+        Purges all WITHDRAWN Intents
+        """
+        try:
+            cmdStr = "purge-intents"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error", handle ):
+                main.log.error( "Error in purging intents" )
+                return main.FALSE
+            else:
+                return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def routes( self, jsonFormat=False ):
+        """
+        NOTE: This method should be used after installing application:
+              onos-app-sdnip
+        Optional:
+            * jsonFormat: enable output formatting in json
+        Description:
+            Obtain all routes in the system
+        """
+        try:
+            cmdStr = "routes"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def ipv4RouteNumber( self ):
+        """
+        NOTE: This method should be used after installing application:
+              onos-app-sdnip
+        Description:
+            Obtain the total IPv4 routes number in the system
+        """
+        try:
+            cmdStr = "routes -s -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            jsonResult = json.loads( handle )
+            return jsonResult['totalRoutes4']
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, handle ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def intents( self, jsonFormat = True, summary = False, **intentargs):
+        """
+        Description:
+            Obtain intents from the ONOS cli.
+        Optional:
+            * jsonFormat: Enable output formatting in json, default to True
+            * summary: Whether only output the intent summary, defaults to False
+            * type: Only output a certain type of intent. This options is valid
+                    only when jsonFormat is True and summary is True.
+        """
+        try:
+            cmdStr = "intents"
+            if summary:
+                cmdStr += " -s"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            args = utilities.parse_args( [ "TYPE" ], **intentargs )
+            if "TYPE" in args.keys():
+                intentType = args[ "TYPE" ]
+            else:
+                intentType = ""
+            # IF we want the summary of a specific intent type
+            if jsonFormat and summary and ( intentType != "" ):
+                jsonResult = json.loads( handle )
+                if intentType in jsonResult.keys():
+                    return jsonResult[ intentType ]
+                else:
+                    main.log.error( "unknown TYPE, returning all types of intents" )
+                    return handle
+            else:
+                return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, handle ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getIntentState(self, intentsId, intentsJson=None):
+        """
+            Check intent state.
+            Accepts a single intent ID (string type) or a list of intent IDs.
+            Returns the state(string type) of the id if a single intent ID is
+            accepted.
+            Returns a dictionary with intent IDs as the key and its
+            corresponding states as the values
+            Parameters:
+            intentId: intent ID (string type)
+            intentsJson: parsed json object from the onos:intents api
+            Returns:
+            state = An intent's state- INSTALL,WITHDRAWN etc.
+            stateDict = Dictionary of intent's state. intent ID as the keys and
+            state as the values.
+        """
+        try:
+            state = "State is Undefined"
+            if not intentsJson:
+                rawJson = self.intents()
+            else:
+                rawJson = intentsJson
+            parsedIntentsJson = json.loads( rawJson )
+            if isinstance( intentsId, types.StringType ):
+                for intent in parsedIntentsJson:
+                    if intentsId == intent[ 'id' ]:
+                        state = intent[ 'state' ]
+                        return state
+                main.log.info( "Cannot find intent ID" + str( intentsId ) +
+                               " on the list" )
+                return state
+            elif isinstance( intentsId, types.ListType ):
+                dictList = []
+                for i in xrange( len( intentsId ) ):
+                    stateDict = {}
+                    for intents in parsedIntentsJson:
+                        if intentsId[ i ] == intents[ 'id' ]:
+                            stateDict[ 'state' ] = intents[ 'state' ]
+                            stateDict[ 'id' ] = intentsId[ i ]
+                            dictList.append( stateDict )
+                            break
+                if len( intentsId ) != len( dictList ):
+                    main.log.info( "Cannot find some of the intent ID state" )
+                return dictList
+            else:
+                main.log.info( "Invalid intents ID entry" )
+                return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def checkIntentState( self, intentsId, expectedState='INSTALLED' ):
+        """
+        Description:
+            Check intents state
+        Required:
+            intentsId - List of intents ID to be checked
+        Optional:
+            expectedState - Check the expected state(s) of each intents
+                            state in the list.
+                            *NOTE: You can pass in a list of expected state,
+                            Eg: expectedState = [ 'INSTALLED' , 'INSTALLING' ]
+        Return:
+            Returns main.TRUE only if all intent are the same as expected states
+            , otherwise, returns main.FALSE.
+        """
+        try:
+            # Generating a dictionary: intent id as a key and state as value
+            returnValue = main.TRUE
+            intentsDict = self.getIntentState( intentsId )
+            if len( intentsId ) != len( intentsDict ):
+                main.log.info( self.name + ": There is something wrong " +
+                               "getting intents state" )
+                return main.FALSE
+
+            if isinstance( expectedState, types.StringType ):
+                for intents in intentsDict:
+                    if intents.get( 'state' ) != expectedState:
+                        main.log.debug( self.name + " : Intent ID - " +
+                                        intents.get( 'id' ) +
+                                        " actual state = " +
+                                        intents.get( 'state' )
+                                        + " does not equal expected state = "
+                                        + expectedState )
+                        returnValue = main.FALSE
+
+            elif isinstance( expectedState, types.ListType ):
+                for intents in intentsDict:
+                    if not any( state == intents.get( 'state' ) for state in
+                                expectedState ):
+                        main.log.debug( self.name + " : Intent ID - " +
+                                        intents.get( 'id' ) +
+                                        " actual state = " +
+                                        intents.get( 'state' ) +
+                                        " does not equal expected states = "
+                                        + str( expectedState ) )
+                        returnValue = main.FALSE
+
+            if returnValue == main.TRUE:
+                main.log.info( self.name + ": All " +
+                               str( len( intentsDict ) ) +
+                               " intents are in " + str( expectedState ) +
+                               " state" )
+            return returnValue
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def checkIntentSummary( self, timeout=60 ):
+        """
+        Description:
+            Check the number of installed intents.
+        Optional:
+            timeout - the timeout for pexcept
+        Return:
+            Returns main.TRUE only if the number of all installed intents are the same as total intents number
+            , otherwise, returns main.FALSE.
+        """
+
+        try:
+            cmd = "intents -s -j"
+
+            # Check response if something wrong
+            response = self.sendline( cmd, timeout=timeout )
+            if response == None:
+                return main.False
+            response = json.loads( response )
+
+            # get total and installed number, see if they are match
+            allState = response.get( 'all' )
+            if allState.get('total') == allState.get('installed'):
+                main.log.info( 'Total Intents: {}   Installed Intents: {}'.format( allState.get('total'), allState.get('installed') ) )
+                return main.TRUE
+            main.log.info( 'Verified Intents failed Excepte intetnes: {} installed intents: {}'.format( allState.get('total'), allState.get('installed') ) )
+            return main.FALSE
+
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, response ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def flows( self, state="", jsonFormat=True, timeout=60 ):
+        """
+        Optional:
+            * jsonFormat: enable output formatting in json
+        Description:
+            Obtain flows currently installed
+        """
+        try:
+            cmdStr = "flows"
+            if jsonFormat:
+                cmdStr += " -j "
+            cmdStr += state
+            handle = self.sendline( cmdStr, timeout=timeout )
+            assert "Command not found:" not in handle, handle
+            if re.search( "Error:", handle ):
+                main.log.error( self.name + ": flows() response: " +
+                                str( handle ) )
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ": ONOS timeout" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+
+    def checkFlowsState( self, isPENDING=True, timeout=60 ):
+        """
+        Description:
+            Check the if all the current flows are in ADDED state
+            We check PENDING_ADD, PENDING_REMOVE, REMOVED, and FAILED flows,
+            if the count of those states is 0, which means all current flows
+            are in ADDED state, and return main.TRUE otherwise return main.FALSE
+        Optional:
+            * isPENDING:  whether the PENDING_ADD is also a correct status
+        Return:
+            returnValue - Returns main.TRUE only if all flows are in
+                          ADDED state or PENDING_ADD if the isPENDING
+                          parameter is set true, return main.FALSE otherwise.
+        """
+        try:
+            states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
+            checkedStates = []
+            statesCount = [0, 0, 0, 0]
+            for s in states:
+                rawFlows = self.flows( state=s, timeout = timeout )
+                checkedStates.append( json.loads( rawFlows ) )
+            for i in range( len( states ) ):
+                for c in checkedStates[i]:
+                    try:
+                        statesCount[i] += int( c.get( "flowCount" ) )
+                    except TypeError:
+                        main.log.exception( "Json object not as expected" )
+                main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
+
+            # We want to count PENDING_ADD if isPENDING is true
+            if isPENDING:
+                if statesCount[1] + statesCount[2] + statesCount[3] > 0:
+                    return main.FALSE
+            else:
+                if statesCount[0] + statesCount[1] + statesCount[2] + statesCount[3] > 0:
+                    return main.FALSE
+            return main.TRUE
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawFlows ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def pushTestIntents( self, ingress, egress, batchSize, offset="",
+                         options="", timeout=10, background = False ):
+        """
+        Description:
+            Push a number of intents in a batch format to
+            a specific point-to-point intent definition
+        Required:
+            * ingress: specify source dpid
+            * egress: specify destination dpid
+            * batchSize: specify number of intents to push
+        Optional:
+            * offset: the keyOffset is where the next batch of intents
+                      will be installed
+        Returns: If failed to push test intents, it will returen None,
+                 if successful, return true.
+                 Timeout expection will return None,
+                 TypeError will return false
+                 other expections will exit()
+        """
+        try:
+            if background:
+                back = "&"
+            else:
+                back = ""
+            cmd = "push-test-intents {} {} {} {} {} {}".format( options,
+                                                                ingress,
+                                                                egress,
+                                                                batchSize,
+                                                                offset,
+                                                                back )
+            response = self.sendline( cmd, timeout=timeout )
+            assert "Command not found:" not in response, response
+            main.log.info( response )
+            if response == None:
+                return None
+
+            # TODO: We should handle if there is failure in installation
+            return main.TRUE
+
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except pexpect.TIMEOUT:
+            main.log.error( self.name + ": ONOS timeout" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getTotalFlowsNum( self, timeout=60 ):
+        """
+        Description:
+            Get the number of ADDED flows.
+        Return:
+            The number of ADDED flows
+        """
+
+        try:
+            # get total added flows number
+            cmd = "flows -s|grep ADDED|wc -l"
+            totalFlows = self.sendline( cmd, timeout=timeout )
+
+            if totalFlows == None:
+                # if timeout, we will get total number of all flows, and subtract other states
+                states = ["PENDING_ADD", "PENDING_REMOVE", "REMOVED", "FAILED"]
+                checkedStates = []
+                totalFlows = 0
+                statesCount = [0, 0, 0, 0]
+
+                # get total flows from summary
+                response = json.loads( self.sendline( "summary -j", timeout=timeout ) )
+                totalFlows = int( response.get("flows") )
+
+                for s in states:
+                    rawFlows = self.flows( state=s, timeout = timeout )
+                    if rawFlows == None:
+                        # if timeout, return the total flows number from summary command
+                        return totalFlows
+                    checkedStates.append( json.loads( rawFlows ) )
+
+                # Calculate ADDED flows number, equal total subtracts others
+                for i in range( len( states ) ):
+                    for c in checkedStates[i]:
+                        try:
+                            statesCount[i] += int( c.get( "flowCount" ) )
+                        except TypeError:
+                            main.log.exception( "Json object not as expected" )
+                    totalFlows = totalFlows - int( statesCount[i] )
+                    main.log.info( states[i] + " flows: " + str( statesCount[i] ) )
+
+                return totalFlows
+
+            return totalFlows
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getTotalIntentsNum( self ):
+        """
+        Description:
+            Get the total number of intents, include every states.
+        Return:
+            The number of intents
+        """
+        try:
+            cmd = "summary -j"
+            response = self.sendline( cmd )
+            if response == None:
+                return  -1
+            response = json.loads( response )
+            return int( response.get("intents") )
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def intentsEventsMetrics( self, jsonFormat=True ):
+        """
+        Description:Returns topology metrics
+        Optional:
+            * jsonFormat: enable json formatting of output
+        """
+        try:
+            cmdStr = "intents-events-metrics"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def topologyEventsMetrics( self, jsonFormat=True ):
+        """
+        Description:Returns topology metrics
+        Optional:
+            * jsonFormat: enable json formatting of output
+        """
+        try:
+            cmdStr = "topology-events-metrics"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            if handle:
+                return handle
+            elif jsonFormat:
+                # Return empty json
+                return '{}'
+            else:
+                return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    # Wrapper functions ****************
+    # Wrapper functions use existing driver
+    # functions and extends their use case.
+    # For example, we may use the output of
+    # a normal driver function, and parse it
+    # using a wrapper function
+
+    def getAllIntentsId( self ):
+        """
+        Description:
+            Obtain all intent id's in a list
+        """
+        try:
+            # Obtain output of intents function
+            intentsStr = self.intents(jsonFormat=False)
+            intentIdList = []
+
+            # Parse the intents output for ID's
+            intentsList = [ s.strip() for s in intentsStr.splitlines() ]
+            for intents in intentsList:
+                match = re.search('id=0x([\da-f]+),', intents)
+                if match:
+                    tmpId = match.group()[3:-1]
+                    intentIdList.append( tmpId )
+            return intentIdList
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def FlowAddedCount( self, deviceId ):
+        """
+        Determine the number of flow rules for the given device id that are
+        in the added state
+        """
+        try:
+            cmdStr = "flows any " + str( deviceId ) + " | " +\
+                     "grep 'state=ADDED' | wc -l"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getAllDevicesId( self ):
+        """
+        Use 'devices' function to obtain list of all devices
+        and parse the result to obtain a list of all device
+        id's. Returns this list. Returns empty list if no
+        devices exist
+        List is ordered sequentially
+
+        This function may be useful if you are not sure of the
+        device id, and wish to execute other commands using
+        the ids. By obtaining the list of device ids on the fly,
+        you can iterate through the list to get mastership, etc.
+        """
+        try:
+            # Call devices and store result string
+            devicesStr = self.devices( jsonFormat=False )
+            idList = []
+
+            if not devicesStr:
+                main.log.info( "There are no devices to get id from" )
+                return idList
+
+            # Split the string into list by comma
+            deviceList = devicesStr.split( "," )
+            # Get temporary list of all arguments with string 'id='
+            tempList = [ dev for dev in deviceList if "id=" in dev ]
+            # Split list further into arguments before and after string
+            # 'id='. Get the latter portion ( the actual device id ) and
+            # append to idList
+            for arg in tempList:
+                idList.append( arg.split( "id=" )[ 1 ] )
+            return idList
+
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getAllNodesId( self ):
+        """
+        Uses 'nodes' function to obtain list of all nodes
+        and parse the result of nodes to obtain just the
+        node id's.
+        Returns:
+            list of node id's
+        """
+        try:
+            nodesStr = self.nodes( jsonFormat=True )
+            idList = []
+            # Sample nodesStr output
+            # id=local, address=127.0.0.1:9876, state=READY *
+            if not nodesStr:
+                main.log.info( "There are no nodes to get id from" )
+                return idList
+            nodesJson = json.loads( nodesStr )
+            idList = [ node.get('id') for node in nodesJson ]
+            return idList
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, nodesStr ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getDevice( self, dpid=None ):
+        """
+        Return the first device from the devices api whose 'id' contains 'dpid'
+        Return None if there is no match
+        """
+        try:
+            if dpid is None:
+                return None
+            else:
+                dpid = dpid.replace( ':', '' )
+                rawDevices = self.devices()
+                devicesJson = json.loads( rawDevices )
+                # search json for the device with dpid then return the device
+                for device in devicesJson:
+                    # print "%s in  %s?" % ( dpid, device[ 'id' ] )
+                    if dpid in device[ 'id' ]:
+                        return device
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawDevices ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def checkStatus( self, ip, numoswitch, numolink, logLevel="info" ):
+        """
+        Checks the number of switches & links that ONOS sees against the
+        supplied values. By default this will report to main.log, but the
+        log level can be specified.
+
+        Params: ip = ip used for the onos cli
+                numoswitch = expected number of switches
+                numolink = expected number of links
+                logLevel = level to log to. Currently accepts
+                'info', 'warn' and 'report'
+
+
+        logLevel can
+
+        Returns: main.TRUE if the number of switches and links are correct,
+                 main.FALSE if the number of switches and links is incorrect,
+                 and main.ERROR otherwise
+        """
+        try:
+            topology = self.getTopology( ip )
+            if topology == {}:
+                return main.ERROR
+            output = ""
+            # Is the number of switches is what we expected
+            devices = topology.get( 'devices', False )
+            links = topology.get( 'links', False )
+            if devices is False or links is False:
+                return main.ERROR
+            switchCheck = ( int( devices ) == int( numoswitch ) )
+            # Is the number of links is what we expected
+            linkCheck = ( int( links ) == int( numolink ) )
+            if ( switchCheck and linkCheck ):
+                # We expected the correct numbers
+                output += "The number of links and switches match " +\
+                          "what was expected"
+                result = main.TRUE
+            else:
+                output += "The number of links and switches does not match " +\
+                          "what was expected"
+                result = main.FALSE
+            output = output + "\n ONOS sees %i devices (%i expected) \
+                    and %i links (%i expected)" % (
+                int( devices ), int( numoswitch ), int( links ),
+                int( numolink ) )
+            if logLevel == "report":
+                main.log.report( output )
+            elif logLevel == "warn":
+                main.log.warn( output )
+            else:
+                main.log.info( self.name + ": " + output )
+            return result
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def deviceRole( self, deviceId, onosNode, role="master" ):
+        """
+        Calls the device-role cli command.
+        deviceId must be the id of a device as seen in the onos devices command
+        onosNode is the ip of one of the onos nodes in the cluster
+        role must be either master, standby, or none
+
+        Returns:
+            main.TRUE or main.FALSE based on argument verification and
+            main.ERROR if command returns and error
+        """
+        try:
+            if role.lower() == "master" or role.lower() == "standby" or\
+                    role.lower() == "none":
+                cmdStr = "device-role " +\
+                    str( deviceId ) + " " +\
+                    str( onosNode ) + " " +\
+                    str( role )
+                handle = self.sendline( cmdStr )
+                assert "Command not found:" not in handle, handle
+                if re.search( "Error", handle ):
+                    # end color output to escape any colours
+                    # from the cli
+                    main.log.error( self.name + ": " +
+                                    handle + '\033[0m' )
+                    return main.ERROR
+                return main.TRUE
+            else:
+                main.log.error( "Invalid 'role' given to device_role(). " +
+                                "Value was '" + str(role) + "'." )
+                return main.FALSE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def clusters( self, jsonFormat=True ):
+        """
+        Lists all clusters
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "clusters"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def electionTestLeader( self ):
+        """
+        CLI command to get the current leader for the Election test application
+        NOTE: Requires installation of the onos-app-election feature
+        Returns: Node IP of the leader if one exists
+                 None if none exists
+                 Main.FALSE on error
+        """
+        try:
+            cmdStr = "election-test-leader"
+            response = self.sendline( cmdStr )
+            assert "Command not found:" not in response, response
+            # Leader
+            leaderPattern = "The\scurrent\sleader\sfor\sthe\sElection\s" +\
+                "app\sis\s(?P<node>.+)\."
+            nodeSearch = re.search( leaderPattern, response )
+            if nodeSearch:
+                node = nodeSearch.group( 'node' )
+                main.log.info( "Election-test-leader on " + str( self.name ) +
+                               " found " + node + " as the leader" )
+                return node
+            # no leader
+            nullPattern = "There\sis\scurrently\sno\sleader\selected\sfor\s" +\
+                "the\sElection\sapp"
+            nullSearch = re.search( nullPattern, response )
+            if nullSearch:
+                main.log.info( "Election-test-leader found no leader on " +
+                               self.name )
+                return None
+            # error
+            errorPattern = "Command\snot\sfound"
+            if re.search( errorPattern, response ):
+                main.log.error( "Election app is not loaded on " + self.name )
+                # TODO: Should this be main.ERROR?
+                return main.FALSE
+            else:
+                main.log.error( "Error in electionTestLeader on " + self.name +
+                                ": " + "unexpected response" )
+                main.log.error( repr( response ) )
+                return main.FALSE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def electionTestRun( self ):
+        """
+        CLI command to run for leadership of the Election test application.
+        NOTE: Requires installation of the onos-app-election feature
+        Returns: Main.TRUE on success
+                 Main.FALSE on error
+        """
+        try:
+            cmdStr = "election-test-run"
+            response = self.sendline( cmdStr )
+            assert "Command not found:" not in response, response
+            # success
+            successPattern = "Entering\sleadership\selections\sfor\sthe\s" +\
+                "Election\sapp."
+            search = re.search( successPattern, response )
+            if search:
+                main.log.info( self.name + " entering leadership elections " +
+                               "for the Election app." )
+                return main.TRUE
+            # error
+            errorPattern = "Command\snot\sfound"
+            if re.search( errorPattern, response ):
+                main.log.error( "Election app is not loaded on " + self.name )
+                return main.FALSE
+            else:
+                main.log.error( "Error in electionTestRun on " + self.name +
+                                ": " + "unexpected response" )
+                main.log.error( repr( response ) )
+                return main.FALSE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def electionTestWithdraw( self ):
+        """
+         * CLI command to withdraw the local node from leadership election for
+         * the Election test application.
+         #NOTE: Requires installation of the onos-app-election feature
+         Returns: Main.TRUE on success
+                  Main.FALSE on error
+        """
+        try:
+            cmdStr = "election-test-withdraw"
+            response = self.sendline( cmdStr )
+            assert "Command not found:" not in response, response
+            # success
+            successPattern = "Withdrawing\sfrom\sleadership\selections\sfor" +\
+                "\sthe\sElection\sapp."
+            if re.search( successPattern, response ):
+                main.log.info( self.name + " withdrawing from leadership " +
+                               "elections for the Election app." )
+                return main.TRUE
+            # error
+            errorPattern = "Command\snot\sfound"
+            if re.search( errorPattern, response ):
+                main.log.error( "Election app is not loaded on " + self.name )
+                return main.FALSE
+            else:
+                main.log.error( "Error in electionTestWithdraw on " +
+                                self.name + ": " + "unexpected response" )
+                main.log.error( repr( response ) )
+                return main.FALSE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getDevicePortsEnabledCount( self, dpid ):
+        """
+        Get the count of all enabled ports on a particular device/switch
+        """
+        try:
+            dpid = str( dpid )
+            cmdStr = "onos:ports -e " + dpid + " | wc -l"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            if re.search( "No such device", output ):
+                main.log.error( "Error in getting ports" )
+                return ( output, "Error" )
+            else:
+                return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return ( output, "Error" )
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getDeviceLinksActiveCount( self, dpid ):
+        """
+        Get the count of all enabled ports on a particular device/switch
+        """
+        try:
+            dpid = str( dpid )
+            cmdStr = "onos:links " + dpid + " | grep ACTIVE | wc -l"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            if re.search( "No such device", output ):
+                main.log.error( "Error in getting ports " )
+                return ( output, "Error " )
+            else:
+                return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return ( output, "Error " )
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getAllIntentIds( self ):
+        """
+        Return a list of all Intent IDs
+        """
+        try:
+            cmdStr = "onos:intents | grep id="
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            if re.search( "Error", output ):
+                main.log.error( "Error in getting ports" )
+                return ( output, "Error" )
+            else:
+                return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return ( output, "Error" )
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def intentSummary( self ):
+        """
+        Returns a dictionary containing the current intent states and the count
+        """
+        try:
+            intents = self.intents( )
+            states = []
+            for intent in json.loads( intents ):
+                states.append( intent.get( 'state', None ) )
+            out = [ ( i, states.count( i ) ) for i in set( states ) ]
+            main.log.info( dict( out ) )
+            return dict( out )
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, intents ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def leaders( self, jsonFormat=True ):
+        """
+        Returns the output of the leaders command.
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "onos:leaders"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def leaderCandidates( self, jsonFormat=True ):
+        """
+        Returns the output of the leaders -c command.
+        Optional argument:
+            * jsonFormat - boolean indicating if you want output in json
+        """
+        try:
+            cmdStr = "onos:leaders -c"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def specificLeaderCandidate( self, topic ):
+        """
+        Returns a list in format [leader,candidate1,candidate2,...] for a given
+        topic parameter and an empty list if the topic doesn't exist
+        If no leader is elected leader in the returned list will be "none"
+        Returns None if there is a type error processing the json object
+        """
+        try:
+            cmdStr = "onos:leaders -j"
+            rawOutput = self.sendline( cmdStr )
+            assert "Command not found:" not in rawOutput, rawOutput
+            output = json.loads( rawOutput )
+            results = []
+            for dict in output:
+                if dict["topic"] == topic:
+                    leader = dict["leader"]
+                    candidates = re.split( ", ", dict["candidates"][1:-1] )
+                    results.append( leader )
+                    results.extend( candidates )
+            return results
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawOutput ) )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def pendingMap( self, jsonFormat=True ):
+        """
+        Returns the output of the intent Pending map.
+        """
+        try:
+            cmdStr = "onos:intents -p"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def partitions( self, jsonFormat=True ):
+        """
+        Returns the output of the raft partitions command for ONOS.
+        """
+        # Sample JSON
+        # {
+        #     "leader": "tcp://10.128.30.11:7238",
+        #     "members": [
+        #         "tcp://10.128.30.11:7238",
+        #         "tcp://10.128.30.17:7238",
+        #         "tcp://10.128.30.13:7238",
+        #     ],
+        #     "name": "p1",
+        #     "term": 3
+        # },
+        try:
+            cmdStr = "onos:partitions"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def apps( self, jsonFormat=True ):
+        """
+        Returns the output of the apps command for ONOS. This command lists
+        information about installed ONOS applications
+        """
+        # Sample JSON object
+        # [{"name":"org.onosproject.openflow","id":0,"version":"1.2.0",
+        # "description":"ONOS OpenFlow protocol southbound providers",
+        # "origin":"ON.Lab","permissions":"[]","featuresRepo":"",
+        # "features":"[onos-openflow]","state":"ACTIVE"}]
+        try:
+            cmdStr = "onos:apps"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            return output
+        # FIXME: look at specific exceptions/Errors
+        except AssertionError:
+            main.log.exception( "Error in processing onos:app command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def appStatus( self, appName ):
+        """
+        Uses the onos:apps cli command to return the status of an application.
+        Returns:
+            "ACTIVE" - If app is installed and activated
+            "INSTALLED" - If app is installed and deactivated
+            "UNINSTALLED" - If app is not installed
+            None - on error
+        """
+        try:
+            if not isinstance( appName, types.StringType ):
+                main.log.error( self.name + ".appStatus(): appName must be" +
+                                " a string" )
+                return None
+            output = self.apps( jsonFormat=True )
+            appsJson = json.loads( output )
+            state = None
+            for app in appsJson:
+                if appName == app.get('name'):
+                    state = app.get('state')
+                    break
+            if state == "ACTIVE" or state == "INSTALLED":
+                return state
+            elif state is None:
+                return "UNINSTALLED"
+            elif state:
+                main.log.error( "Unexpected state from 'onos:apps': " +
+                                str( state ) )
+                return state
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, output ) )
+            main.stop()
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def app( self, appName, option ):
+        """
+        Interacts with the app command for ONOS. This command manages
+        application inventory.
+        """
+        try:
+            # Validate argument types
+            valid = True
+            if not isinstance( appName, types.StringType ):
+                main.log.error( self.name + ".app(): appName must be a " +
+                                "string" )
+                valid = False
+            if not isinstance( option, types.StringType ):
+                main.log.error( self.name + ".app(): option must be a string" )
+                valid = False
+            if not valid:
+                return main.FALSE
+            # Validate Option
+            option = option.lower()
+            # NOTE: Install may become a valid option
+            if option == "activate":
+                pass
+            elif option == "deactivate":
+                pass
+            elif option == "uninstall":
+                pass
+            else:
+                # Invalid option
+                main.log.error( "The ONOS app command argument only takes " +
+                                "the values: (activate|deactivate|uninstall)" +
+                                "; was given '" + option + "'")
+                return main.FALSE
+            cmdStr = "onos:app " + option + " " + appName
+            output = self.sendline( cmdStr )
+            if "Error executing command" in output:
+                main.log.error( "Error in processing onos:app command: " +
+                                str( output ) )
+                return main.FALSE
+            elif "No such application" in output:
+                main.log.error( "The application '" + appName +
+                                "' is not installed in ONOS" )
+                return main.FALSE
+            elif "Command not found:" in output:
+                main.log.error( "Error in processing onos:app command: " +
+                                str( output ) )
+                return main.FALSE
+            elif "Unsupported command:" in output:
+                main.log.error( "Incorrect command given to 'app': " +
+                                str( output ) )
+            # NOTE: we may need to add more checks here
+            # else: Command was successful
+            # main.log.debug( "app response: " + repr( output ) )
+            return main.TRUE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def activateApp( self, appName, check=True ):
+        """
+        Activate an app that is already installed in ONOS
+        appName is the hierarchical app name, not the feature name
+        If check is True, method will check the status of the app after the
+        command is issued
+        Returns main.TRUE if the command was successfully sent
+                main.FALSE if the cli responded with an error or given
+                    incorrect input
+        """
+        try:
+            if not isinstance( appName, types.StringType ):
+                main.log.error( self.name + ".activateApp(): appName must be" +
+                                " a string" )
+                return main.FALSE
+            status = self.appStatus( appName )
+            if status == "INSTALLED":
+                response = self.app( appName, "activate" )
+                if check and response == main.TRUE:
+                    for i in range(10):  # try 10 times then give up
+                        status = self.appStatus( appName )
+                        if status == "ACTIVE":
+                            return main.TRUE
+                        else:
+                            main.log.debug( "The state of application " +
+                                            appName + " is " + status )
+                            time.sleep( 1 )
+                    return main.FALSE
+                else:  # not 'check' or command didn't succeed
+                    return response
+            elif status == "ACTIVE":
+                return main.TRUE
+            elif status == "UNINSTALLED":
+                main.log.error( self.name + ": Tried to activate the " +
+                                "application '" + appName + "' which is not " +
+                                "installed." )
+            else:
+                main.log.error( "Unexpected return value from appStatus: " +
+                                str( status ) )
+                return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def deactivateApp( self, appName, check=True ):
+        """
+        Deactivate an app that is already activated in ONOS
+        appName is the hierarchical app name, not the feature name
+        If check is True, method will check the status of the app after the
+        command is issued
+        Returns main.TRUE if the command was successfully sent
+                main.FALSE if the cli responded with an error or given
+                    incorrect input
+        """
+        try:
+            if not isinstance( appName, types.StringType ):
+                main.log.error( self.name + ".deactivateApp(): appName must " +
+                                "be a string" )
+                return main.FALSE
+            status = self.appStatus( appName )
+            if status == "INSTALLED":
+                return main.TRUE
+            elif status == "ACTIVE":
+                response = self.app( appName, "deactivate" )
+                if check and response == main.TRUE:
+                    for i in range(10):  # try 10 times then give up
+                        status = self.appStatus( appName )
+                        if status == "INSTALLED":
+                            return main.TRUE
+                        else:
+                            time.sleep( 1 )
+                    return main.FALSE
+                else:  # not check or command didn't succeed
+                    return response
+            elif status == "UNINSTALLED":
+                main.log.warn( self.name + ": Tried to deactivate the " +
+                                "application '" + appName + "' which is not " +
+                                "installed." )
+                return main.TRUE
+            else:
+                main.log.error( "Unexpected return value from appStatus: " +
+                                str( status ) )
+                return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def uninstallApp( self, appName, check=True ):
+        """
+        Uninstall an app that is already installed in ONOS
+        appName is the hierarchical app name, not the feature name
+        If check is True, method will check the status of the app after the
+        command is issued
+        Returns main.TRUE if the command was successfully sent
+                main.FALSE if the cli responded with an error or given
+                    incorrect input
+        """
+        # TODO: check with Thomas about the state machine for apps
+        try:
+            if not isinstance( appName, types.StringType ):
+                main.log.error( self.name + ".uninstallApp(): appName must " +
+                                "be a string" )
+                return main.FALSE
+            status = self.appStatus( appName )
+            if status == "INSTALLED":
+                response = self.app( appName, "uninstall" )
+                if check and response == main.TRUE:
+                    for i in range(10):  # try 10 times then give up
+                        status = self.appStatus( appName )
+                        if status == "UNINSTALLED":
+                            return main.TRUE
+                        else:
+                            time.sleep( 1 )
+                    return main.FALSE
+                else:  # not check or command didn't succeed
+                    return response
+            elif status == "ACTIVE":
+                main.log.warn( self.name + ": Tried to uninstall the " +
+                                "application '" + appName + "' which is " +
+                                "currently active." )
+                response = self.app( appName, "uninstall" )
+                if check and response == main.TRUE:
+                    for i in range(10):  # try 10 times then give up
+                        status = self.appStatus( appName )
+                        if status == "UNINSTALLED":
+                            return main.TRUE
+                        else:
+                            time.sleep( 1 )
+                    return main.FALSE
+                else:  # not check or command didn't succeed
+                    return response
+            elif status == "UNINSTALLED":
+                return main.TRUE
+            else:
+                main.log.error( "Unexpected return value from appStatus: " +
+                                str( status ) )
+                return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def appIDs( self, jsonFormat=True ):
+        """
+        Show the mappings between app id and app names given by the 'app-ids'
+        cli command
+        """
+        try:
+            cmdStr = "app-ids"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "Error in processing onos:app-ids command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def appToIDCheck( self ):
+        """
+        This method will check that each application's ID listed in 'apps' is
+        the same as the ID listed in 'app-ids'. The check will also check that
+        there are no duplicate IDs issued. Note that an app ID should be
+        a globaly unique numerical identifier for app/app-like features. Once
+        an ID is registered, the ID is never freed up so that if an app is
+        reinstalled it will have the same ID.
+
+        Returns: main.TRUE  if the check passes and
+                 main.FALSE if the check fails or
+                 main.ERROR if there is some error in processing the test
+        """
+        try:
+            bail = False
+            rawJson = self.appIDs( jsonFormat=True )
+            if rawJson:
+                ids = json.loads( rawJson )
+            else:
+                main.log.error( "app-ids returned nothing:" + repr( rawJson ) )
+                bail = True
+            rawJson = self.apps( jsonFormat=True )
+            if rawJson:
+                apps = json.loads( rawJson )
+            else:
+                main.log.error( "apps returned nothing:" + repr( rawJson ) )
+                bail = True
+            if bail:
+                return main.FALSE
+            result = main.TRUE
+            for app in apps:
+                appID = app.get( 'id' )
+                if appID is None:
+                    main.log.error( "Error parsing app: " + str( app ) )
+                    result = main.FALSE
+                appName = app.get( 'name' )
+                if appName is None:
+                    main.log.error( "Error parsing app: " + str( app ) )
+                    result = main.FALSE
+                # get the entry in ids that has the same appID
+                current = filter( lambda item: item[ 'id' ] == appID, ids )
+                # main.log.debug( "Comparing " + str( app ) + " to " +
+                #                 str( current ) )
+                if not current:  # if ids doesn't have this id
+                    result = main.FALSE
+                    main.log.error( "'app-ids' does not have the ID for " +
+                                    str( appName ) + " that apps does." )
+                elif len( current ) > 1:
+                    # there is more than one app with this ID
+                    result = main.FALSE
+                    # We will log this later in the method
+                elif not current[0][ 'name' ] == appName:
+                    currentName = current[0][ 'name' ]
+                    result = main.FALSE
+                    main.log.error( "'app-ids' has " + str( currentName ) +
+                                    " registered under id:" + str( appID ) +
+                                    " but 'apps' has " + str( appName ) )
+                else:
+                    pass  # id and name match!
+            # now make sure that app-ids has no duplicates
+            idsList = []
+            namesList = []
+            for item in ids:
+                idsList.append( item[ 'id' ] )
+                namesList.append( item[ 'name' ] )
+            if len( idsList ) != len( set( idsList ) ) or\
+               len( namesList ) != len( set( namesList ) ):
+                    main.log.error( "'app-ids' has some duplicate entries: \n"
+                                    + json.dumps( ids,
+                                                  sort_keys=True,
+                                                  indent=4,
+                                                  separators=( ',', ': ' ) ) )
+                    result = main.FALSE
+            return result
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, rawJson ) )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getCfg( self, component=None, propName=None, short=False,
+                jsonFormat=True ):
+        """
+        Get configuration settings from onos cli
+        Optional arguments:
+            component - Optionally only list configurations for a specific
+                        component. If None, all components with configurations
+                        are displayed. Case Sensitive string.
+            propName - If component is specified, propName option will show
+                       only this specific configuration from that component.
+                       Case Sensitive string.
+            jsonFormat - Returns output as json. Note that this will override
+                         the short option
+            short - Short, less verbose, version of configurations.
+                    This is overridden by the json option
+        returns:
+            Output from cli as a string or None on error
+        """
+        try:
+            baseStr = "cfg"
+            cmdStr = " get"
+            componentStr = ""
+            if component:
+                componentStr += " " + component
+                if propName:
+                    componentStr += " " + propName
+            if jsonFormat:
+                baseStr += " -j"
+            elif short:
+                baseStr += " -s"
+            output = self.sendline( baseStr + cmdStr + componentStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            return output
+        except AssertionError:
+            main.log.exception( "Error in processing 'cfg get' command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setCfg( self, component, propName, value=None, check=True ):
+        """
+        Set/Unset configuration settings from ONOS cli
+        Required arguments:
+            component - The case sensitive name of the component whose
+                        property is to be set
+            propName - The case sensitive name of the property to be set/unset
+        Optional arguments:
+            value - The value to set the property to. If None, will unset the
+                    property and revert it to it's default value(if applicable)
+            check - Boolean, Check whether the option was successfully set this
+                    only applies when a value is given.
+        returns:
+            main.TRUE on success or main.FALSE on failure. If check is False,
+            will return main.TRUE unless there is an error
+        """
+        try:
+            baseStr = "cfg"
+            cmdStr = " set " + str( component ) + " " + str( propName )
+            if value is not None:
+                cmdStr += " " + str( value )
+            output = self.sendline( baseStr + cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            if value and check:
+                results = self.getCfg( component=str( component ),
+                                       propName=str( propName ),
+                                       jsonFormat=True )
+                # Check if current value is what we just set
+                try:
+                    jsonOutput = json.loads( results )
+                    current = jsonOutput[ 'value' ]
+                except ( TypeError, ValueError ):
+                    main.log.exception( "Error parsing cfg output" )
+                    main.log.error( "output:" + repr( results ) )
+                    return main.FALSE
+                if current == str( value ):
+                    return main.TRUE
+                return main.FALSE
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "Error in processing 'cfg set' command." )
+            return main.FALSE
+        except ( TypeError, ValueError ):
+            main.log.exception( "{}: Object not as expected: {!r}".format( self.name, results ) )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setTestAdd( self, setName, values ):
+        """
+        CLI command to add elements to a distributed set.
+        Arguments:
+            setName - The name of the set to add to.
+            values - The value(s) to add to the set, space seperated.
+        Example usages:
+            setTestAdd( "set1", "a b c" )
+            setTestAdd( "set2", "1" )
+        returns:
+            main.TRUE on success OR
+            main.FALSE if elements were already in the set OR
+            main.ERROR on error
+        """
+        try:
+            cmdStr = "set-test-add " + str( setName ) + " " + str( values )
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Error executing command" not in output
+            positiveMatch = "\[(.*)\] was added to the set " + str( setName )
+            negativeMatch = "\[(.*)\] was already in set " + str( setName )
+            main.log.info( self.name + ": " + output )
+            if re.search( positiveMatch, output):
+                return main.TRUE
+            elif re.search( negativeMatch, output):
+                return main.FALSE
+            else:
+                main.log.error( self.name + ": setTestAdd did not" +
+                                " match expected output" )
+                main.log.debug( self.name + " actual: " + repr( output ) )
+                return main.ERROR
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' command. " )
+            return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setTestRemove( self, setName, values, clear=False, retain=False ):
+        """
+        CLI command to remove elements from a distributed set.
+        Required arguments:
+            setName - The name of the set to remove from.
+            values - The value(s) to remove from the set, space seperated.
+        Optional arguments:
+            clear - Clear all elements from the set
+            retain - Retain only the  given values. (intersection of the
+                     original set and the given set)
+        returns:
+            main.TRUE on success OR
+            main.FALSE if the set was not changed OR
+            main.ERROR on error
+        """
+        try:
+            cmdStr = "set-test-remove "
+            if clear:
+                cmdStr += "-c " + str( setName )
+            elif retain:
+                cmdStr += "-r " + str( setName ) + " " + str( values )
+            else:
+                cmdStr += str( setName ) + " " + str( values )
+            output = self.sendline( cmdStr )
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+            if clear:
+                pattern = "Set " + str( setName ) + " cleared"
+                if re.search( pattern, output ):
+                    return main.TRUE
+            elif retain:
+                positivePattern = str( setName ) + " was pruned to contain " +\
+                                  "only elements of set \[(.*)\]"
+                negativePattern = str( setName ) + " was not changed by " +\
+                                  "retaining only elements of the set " +\
+                                  "\[(.*)\]"
+                if re.search( positivePattern, output ):
+                    return main.TRUE
+                elif re.search( negativePattern, output ):
+                    return main.FALSE
+            else:
+                positivePattern = "\[(.*)\] was removed from the set " +\
+                                  str( setName )
+                if ( len( values.split() ) == 1 ):
+                    negativePattern = "\[(.*)\] was not in set " +\
+                                      str( setName )
+                else:
+                    negativePattern = "No element of \[(.*)\] was in set " +\
+                                      str( setName )
+                if re.search( positivePattern, output ):
+                    return main.TRUE
+                elif re.search( negativePattern, output ):
+                    return main.FALSE
+            main.log.error( self.name + ": setTestRemove did not" +
+                            " match expected output" )
+            main.log.debug( self.name + " expected: " + pattern )
+            main.log.debug( self.name + " actual: " + repr( output ) )
+            return main.ERROR
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' commandr. " )
+            return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setTestGet( self, setName, values="" ):
+        """
+        CLI command to get the elements in a distributed set.
+        Required arguments:
+            setName - The name of the set to remove from.
+        Optional arguments:
+            values - The value(s) to check if in the set, space seperated.
+        returns:
+            main.ERROR on error OR
+            A list of elements in the set if no optional arguments are
+                supplied OR
+            A tuple containing the list then:
+                main.FALSE if the given values are not in the set OR
+                main.TRUE if the given values are in the set OR
+        """
+        try:
+            values = str( values ).strip()
+            setName = str( setName ).strip()
+            length = len( values.split() )
+            containsCheck = None
+            # Patterns to match
+            setPattern = "\[(.*)\]"
+            pattern = "Items in set " + setName + ":\n" + setPattern
+            containsTrue = "Set " + setName + " contains the value " + values
+            containsFalse = "Set " + setName + " did not contain the value " +\
+                            values
+            containsAllTrue = "Set " + setName + " contains the the subset " +\
+                              setPattern
+            containsAllFalse = "Set " + setName + " did not contain the the" +\
+                               " subset " + setPattern
+
+            cmdStr = "set-test-get "
+            cmdStr += setName + " " + values
+            output = self.sendline( cmdStr )
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+
+            if length == 0:
+                match = re.search( pattern, output )
+            else:  # if given values
+                if length == 1:  # Contains output
+                    patternTrue = pattern + "\n" + containsTrue
+                    patternFalse = pattern + "\n" + containsFalse
+                else:  # ContainsAll output
+                    patternTrue = pattern + "\n" + containsAllTrue
+                    patternFalse = pattern + "\n" + containsAllFalse
+                matchTrue = re.search( patternTrue, output )
+                matchFalse = re.search( patternFalse, output )
+                if matchTrue:
+                    containsCheck = main.TRUE
+                    match = matchTrue
+                elif matchFalse:
+                    containsCheck = main.FALSE
+                    match = matchFalse
+                else:
+                    main.log.error( self.name + " setTestGet did not match " +\
+                                    "expected output" )
+                    main.log.debug( self.name + " expected: " + pattern )
+                    main.log.debug( self.name + " actual: " + repr( output ) )
+                    match = None
+            if match:
+                setMatch = match.group( 1 )
+                if setMatch == '':
+                    setList = []
+                else:
+                    setList = setMatch.split( ", " )
+                if length > 0:
+                    return ( setList, containsCheck )
+                else:
+                    return setList
+            else:  # no match
+                main.log.error( self.name + ": setTestGet did not" +
+                                " match expected output" )
+                main.log.debug( self.name + " expected: " + pattern )
+                main.log.debug( self.name + " actual: " + repr( output ) )
+                return main.ERROR
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' command." )
+            return main.ERROR
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.ERROR
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setTestSize( self, setName ):
+        """
+        CLI command to get the elements in a distributed set.
+        Required arguments:
+            setName - The name of the set to remove from.
+        returns:
+            The integer value of the size returned or
+            None on error
+        """
+        try:
+            # TODO: Should this check against the number of elements returned
+            #       and then return true/false based on that?
+            setName = str( setName ).strip()
+            # Patterns to match
+            setPattern = "\[(.*)\]"
+            pattern = "There are (\d+) items in set " + setName + ":\n" +\
+                          setPattern
+            cmdStr = "set-test-get -s "
+            cmdStr += setName
+            output = self.sendline( cmdStr )
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+            match = re.search( pattern, output )
+            if match:
+                setSize = int( match.group( 1 ) )
+                setMatch = match.group( 2 )
+                if len( setMatch.split() ) == setSize:
+                    main.log.info( "The size returned by " + self.name +
+                                   " matches the number of elements in " +
+                                   "the returned set" )
+                else:
+                    main.log.error( "The size returned by " + self.name +
+                                    " does not match the number of " +
+                                    "elements in the returned set." )
+                return setSize
+            else:  # no match
+                main.log.error( self.name + ": setTestGet did not" +
+                                " match expected output" )
+                main.log.debug( self.name + " expected: " + pattern )
+                main.log.debug( self.name + " actual: " + repr( output ) )
+                return None
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def counters( self, jsonFormat=True ):
+        """
+        Command to list the various counters in the system.
+        returns:
+            if jsonFormat, a string of the json object returned by the cli
+            command
+            if not jsonFormat, the normal string output of the cli command
+            None on error
+        """
+        try:
+            counters = {}
+            cmdStr = "counters"
+            if jsonFormat:
+                cmdStr += " -j"
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+            return output
+        except AssertionError:
+            main.log.exception( "Error in processing 'counters' command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def counterTestAddAndGet( self, counter, delta=1, inMemory=False ):
+        """
+        CLI command to add a delta to then get a distributed counter.
+        Required arguments:
+            counter - The name of the counter to increment.
+        Optional arguments:
+            delta - The long to add to the counter
+            inMemory - use in memory map for the counter
+        returns:
+            integer value of the counter or
+            None on Error
+        """
+        try:
+            counter = str( counter )
+            delta = int( delta )
+            cmdStr = "counter-test-increment "
+            if inMemory:
+                cmdStr += "-i "
+            cmdStr += counter
+            if delta != 1:
+                cmdStr += " " + str( delta )
+            output = self.sendline( cmdStr )
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+            pattern = counter + " was updated to (-?\d+)"
+            match = re.search( pattern, output )
+            if match:
+                return int( match.group( 1 ) )
+            else:
+                main.log.error( self.name + ": counterTestAddAndGet did not" +
+                                " match expected output." )
+                main.log.debug( self.name + " expected: " + pattern )
+                main.log.debug( self.name + " actual: " + repr( output ) )
+                return None
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def counterTestGetAndAdd( self, counter, delta=1, inMemory=False ):
+        """
+        CLI command to get a distributed counter then add a delta to it.
+        Required arguments:
+            counter - The name of the counter to increment.
+        Optional arguments:
+            delta - The long to add to the counter
+            inMemory - use in memory map for the counter
+        returns:
+            integer value of the counter or
+            None on Error
+        """
+        try:
+            counter = str( counter )
+            delta = int( delta )
+            cmdStr = "counter-test-increment -g "
+            if inMemory:
+                cmdStr += "-i "
+            cmdStr += counter
+            if delta != 1:
+                cmdStr += " " + str( delta )
+            output = self.sendline( cmdStr )
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                retryTime = 30  # Conservative time, given by Madan
+                main.log.info( "Waiting " + str( retryTime ) +
+                               "seconds before retrying." )
+                time.sleep( retryTime )  # Due to change in mastership
+                output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            assert "Error executing command" not in output, output
+            main.log.info( self.name + ": " + output )
+            pattern = counter + " was updated to (-?\d+)"
+            match = re.search( pattern, output )
+            if match:
+                return int( match.group( 1 ) )
+            else:
+                main.log.error( self.name + ": counterTestGetAndAdd did not" +
+                                " match expected output." )
+                main.log.debug( self.name + " expected: " + pattern )
+                main.log.debug( self.name + " actual: " + repr( output ) )
+                return None
+        except AssertionError:
+            main.log.exception( "Error in processing '" + cmdStr + "' command." )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def summary( self, jsonFormat=True ):
+        """
+        Description: Execute summary command in onos
+        Returns: json object ( summary -j ), returns main.FALSE if there is
+        no output
+
+        """
+        try:
+            cmdStr = "summary"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            assert "Error:" not in handle, handle
+            if not handle:
+                main.log.error( self.name + ": There is no output in " +
+                                "summary command" )
+                return main.FALSE
+            return handle
+        except AssertionError:
+            main.log.exception( "{} Error in summary output:".format( self.name ) )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def transactionalMapGet( self, keyName, inMemory=False ):
+        """
+        CLI command to get the value of a key in a consistent map using
+        transactions. This a test function and can only get keys from the
+        test map hard coded into the cli command
+        Required arguments:
+            keyName - The name of the key to get
+        Optional arguments:
+            inMemory - use in memory map for the counter
+        returns:
+            The string value of the key or
+            None on Error
+        """
+        try:
+            keyName = str( keyName )
+            cmdStr = "transactional-map-test-get "
+            if inMemory:
+                cmdStr += "-i "
+            cmdStr += keyName
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                return None
+            pattern = "Key-value pair \(" + keyName + ", (?P<value>.+)\) found."
+            if "Key " + keyName + " not found." in output:
+                return None
+            else:
+                match = re.search( pattern, output )
+                if match:
+                    return match.groupdict()[ 'value' ]
+                else:
+                    main.log.error( self.name + ": transactionlMapGet did not" +
+                                    " match expected output." )
+                    main.log.debug( self.name + " expected: " + pattern )
+                    main.log.debug( self.name + " actual: " + repr( output ) )
+                    return None
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def transactionalMapPut( self, numKeys, value, inMemory=False ):
+        """
+        CLI command to put a value into 'numKeys' number of keys in a
+        consistent map using transactions. This a test function and can only
+        put into keys named 'Key#' of the test map hard coded into the cli command
+        Required arguments:
+            numKeys - Number of keys to add the value to
+            value - The string value to put into the keys
+        Optional arguments:
+            inMemory - use in memory map for the counter
+        returns:
+            A dictionary whose keys are the name of the keys put into the map
+            and the values of the keys are dictionaries whose key-values are
+            'value': value put into map and optionaly
+            'oldValue': Previous value in the key or
+            None on Error
+
+            Example output
+            { 'Key1': {'oldValue': 'oldTestValue', 'value': 'Testing'},
+              'Key2': {'value': 'Testing'} }
+        """
+        try:
+            numKeys = str( numKeys )
+            value = str( value )
+            cmdStr = "transactional-map-test-put "
+            if inMemory:
+                cmdStr += "-i "
+            cmdStr += numKeys + " " + value
+            output = self.sendline( cmdStr )
+            assert "Command not found:" not in output, output
+            try:
+                # TODO: Maybe make this less hardcoded
+                # ConsistentMap Exceptions
+                assert "org.onosproject.store.service" not in output
+                # Node not leader
+                assert "java.lang.IllegalStateException" not in output
+            except AssertionError:
+                main.log.error( "Error in processing '" + cmdStr + "' " +
+                                "command: " + str( output ) )
+                return None
+            newPattern = 'Created Key (?P<key>(\w)+) with value (?P<value>(.)+)\.'
+            updatedPattern = "Put (?P<value>(.)+) into key (?P<key>(\w)+)\. The old value was (?P<oldValue>(.)+)\."
+            results = {}
+            for line in output.splitlines():
+                new = re.search( newPattern, line )
+                updated = re.search( updatedPattern, line )
+                if new:
+                    results[ new.groupdict()[ 'key' ] ] = { 'value': new.groupdict()[ 'value' ] }
+                elif updated:
+                    results[ updated.groupdict()[ 'key' ] ] = { 'value': updated.groupdict()[ 'value' ],
+                                                                'oldValue': updated.groupdict()[ 'oldValue' ] }
+                else:
+                    main.log.error( self.name + ": transactionlMapGet did not" +
+                                    " match expected output." )
+                    main.log.debug( "{} expected: {!r} or {!r}".format( self.name,
+                                                                        newPattern,
+                                                                        updatedPattern ) )
+                    main.log.debug( self.name + " actual: " + repr( output ) )
+            return results
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def maps( self, jsonFormat=True ):
+        """
+        Description: Returns result of onos:maps
+        Optional:
+            * jsonFormat: enable json formatting of output
+        """
+        try:
+            cmdStr = "maps"
+            if jsonFormat:
+                cmdStr += " -j"
+            handle = self.sendline( cmdStr )
+            assert "Command not found:" not in handle, handle
+            return handle
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def getSwController( self, uri, jsonFormat=True ):
+        """
+        Descrition: Gets the controller information from the device
+        """
+        try:
+            cmd = "device-controllers "
+            if jsonFormat:
+                cmd += "-j "
+            response = self.sendline( cmd + uri )
+            assert "Command not found:" not in response, response
+            return response
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return None
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def setSwController( self, uri, ip, proto="tcp", port="6653", jsonFormat=True ):
+        """
+        Descrition: sets the controller(s) for the specified device
+
+        Parameters:
+            Required: uri - String: The uri of the device(switch).
+                      ip - String or List: The ip address of the controller.
+                      This parameter can be formed in a couple of different ways.
+                        VALID:
+                        10.0.0.1 - just the ip address
+                        tcp:10.0.0.1 - the protocol and the ip address
+                        tcp:10.0.0.1:6653 - the protocol and port can be specified,
+                                            so that you can add controllers with different
+                                            protocols and ports
+                        INVALID:
+                        10.0.0.1:6653 - this is not supported by ONOS
+
+            Optional: proto - The type of connection e.g. tcp, ssl. If a list of ips are given
+                      port - The port number.
+                      jsonFormat - If set ONOS will output in json NOTE: This is currently not supported
+
+        Returns: main.TRUE if ONOS returns without any errors, otherwise returns main.FALSE
+        """
+        try:
+            cmd = "device-setcontrollers"
+
+            if jsonFormat:
+                cmd += " -j"
+            cmd += " " + uri
+            if isinstance( ip, str ):
+                ip = [ip]
+            for item in ip:
+                if ":" in item:
+                    sitem = item.split( ":" )
+                    if len(sitem) == 3:
+                        cmd += " " + item
+                    elif "." in sitem[1]:
+                        cmd += " {}:{}".format(item, port)
+                    else:
+                        main.log.error( "Malformed entry: " + item )
+                        raise TypeError
+                else:
+                    cmd += " {}:{}:{}".format( proto, item, port )
+            response = self.sendline( cmd )
+            assert "Command not found:" not in response, response
+            if "Error" in response:
+                main.log.error( response )
+                return main.FALSE
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def removeDevice( self, device ):
+        '''
+        Description:
+            Remove a device from ONOS by passing the uri of the device(s).
+        Parameters:
+            device - (str or list) the id or uri of the device ex. "of:0000000000000001"
+        Returns:
+            Returns main.FALSE if an exception is thrown or an error is present
+            in the response. Otherwise, returns main.TRUE.
+        NOTE:
+            If a host cannot be removed, then this function will return main.FALSE
+        '''
+        try:
+            if type( device ) is str:
+                device = list( device )
+
+            for d in device:
+                time.sleep( 1 )
+                response = self.sendline( "device-remove {}".format( d ) )
+                assert "Command not found:" not in response, response
+                if "Error" in response:
+                    main.log.warn( "Error for device: {}\nResponse: {}".format( d, response ) )
+                    return main.FALSE
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return main.FALSE
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def removeHost( self, host ):
+        '''
+        Description:
+            Remove a host from ONOS by passing the id of the host(s)
+        Parameters:
+            hostId - (str or list) the id or mac of the host ex. "00:00:00:00:00:01"
+        Returns:
+            Returns main.FALSE if an exception is thrown or an error is present
+            in the response. Otherwise, returns main.TRUE.
+        NOTE:
+            If a host cannot be removed, then this function will return main.FALSE
+        '''
+        try:
+            if type( host ) is str:
+                host = list( host )
+
+            for h in host:
+                time.sleep( 1 )
+                response = self.sendline( "host-remove {}".format( h ) )
+                assert "Command not found:" not in response, response
+                if "Error" in response:
+                    main.log.warn( "Error for host: {}\nResponse: {}".format( h, response ) )
+                    return main.FALSE
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+    def link( self, begin, end, state ):
+        '''
+        Description:
+            Bring link down or up in the null-provider.
+        params:
+            begin - (string) One end of a device or switch.
+            end - (string) the other end of the device or switch
+        returns:
+            main.TRUE if no exceptions were thrown and no Errors are
+            present in the resoponse. Otherwise, returns main.FALSE
+        '''
+        try:
+            cmd =  "null-link null:{} null:{} {}".format( begin, end, state )
+            response = self.sendline( cmd, showResponse=True )
+            assert "Command not found:" not in response, response
+            if "Error" in response or "Failure" in response:
+                main.log.error( response )
+                return main.FALSE
+            return main.TRUE
+        except AssertionError:
+            main.log.exception( "" )
+            return None
+        except TypeError:
+            main.log.exception( self.name + ": Object not as expected" )
+            return main.FALSE
+        except pexpect.EOF:
+            main.log.error( self.name + ": EOF exception found" )
+            main.log.error( self.name + ":    " + self.handle.before )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( self.name + ": Uncaught exception!" )
+            main.cleanup()
+            main.exit()
+
+if __name__ == '__main__':
+  onos_cli = OnosCliDriver(connect = False)
+  name = 'onos_cli'
+  user = 'onos'
+  passwd = 'rocks'
+  ip = '172.17.0.2'
+  options = { 'name': '{0}'.format(name), 'onosIp' : '{0}'.format(ip) }
+  onos_cli.connect(name = 'onoscli', user_name = user, pwd = passwd, ip_address = ip,
+                   port = '8101', options = options)
+  device_str = onos_cli.devices(jsonFormat = False)
+  print('Devices: %s' %device_str)
+  device_json = onos_cli.devices()
+  print('Device json: %s' %device_json)
+  routes_str = onos_cli.routes(jsonFormat = False)
+  print('Routes %s' %routes_str)
+  flows_json = onos_cli.flows(state = "ADDED")
+  print('Flows %s' %flows_json)
+  onos_cli.disconnect()
+  
diff --git a/src/test/cli/utilities.py b/src/test/cli/utilities.py
new file mode 100644
index 0000000..15320da
--- /dev/null
+++ b/src/test/cli/utilities.py
@@ -0,0 +1,350 @@
+#!/usr/bin/env python
+'''
+Created on 23-Oct-2012
+
+@authors: Anil Kumar (anilkumar.s@paxterrasolutions.com),
+          Raghav Kashyap(raghavkashyap@paxterrasolutions.com)
+
+
+
+    TestON is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 2 of the License, or
+    (at your option) any later version.
+
+    TestON is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with TestON.  If not, see <http://www.gnu.org/licenses/>.
+
+
+Utilities will take care about the basic functions like :
+   * Extended assertion,
+   * parse_args for key-value pair handling
+   * Parsing the params or topology file.
+
+'''
+import re
+from configobj import ConfigObj
+import ast
+import smtplib
+
+import email
+import os
+import email.mime.application
+import time
+import random
+from clicommon import *
+
+class Utilities:
+    '''
+       Utilities will take care about the basic functions like :
+       * Extended assertion,
+       * parse_args for key-value pair handling
+       * Parsing the params or topology file.
+    '''
+
+    def __init__(self):
+        self.wrapped = sys.modules[__name__]
+
+    def __getattr__(self, name):
+        '''
+        This will invoke, if the attribute wasn't found the usual ways.
+        Here it will look for assert_attribute and will execute when AttributeError occurs.
+        It will return the result of the assert_attribute.
+        '''
+        try:
+            return getattr(self.wrapped, name)
+        except AttributeError:
+            def assertHandling(**kwargs):
+                nameVar = re.match("^assert",name,flags=0)
+                matchVar = re.match("assert(_not_|_)(equals|matches|greater|lesser)",name,flags=0)
+                notVar = 0
+                operators = ""
+
+                try :
+                    if matchVar.group(1) == "_not_" and matchVar.group(2) :
+                        notVar = 1
+                        operators = matchVar.group(2)
+                    elif matchVar.group(1) == "_" and matchVar.group(2):
+                        operators = matchVar.group(2)
+                except AttributeError:
+                    if matchVar==None and nameVar:
+                        operators ='equals'
+                result = self._assert(NOT=notVar,operator=operators,**kwargs)
+                if result == main.TRUE:
+                    main.log.info("Assertion Passed")
+                    main.STEPRESULT = main.TRUE
+                elif result == main.FALSE:
+                    main.log.warn("Assertion Failed")
+                    main.STEPRESULT = main.FALSE
+                else:
+                    main.log.error("There is an Error in Assertion")
+                    main.STEPRESULT = main.ERROR
+                return result
+            return assertHandling
+
+    def _assert (self,**assertParam):
+        '''
+        It will take the arguments :
+        expect:'Expected output'
+        actual:'Actual output'
+        onpass:'Action or string to be triggered or displayed respectively when the assert passed'
+        onfail:'Action or string to be triggered or displayed respectively when the assert failed'
+        not:'optional argument to specify the negation of the each assertion type'
+        operator:'assertion type will be defined by using operator. Like equal , greater, lesser, matches.'
+
+        It will return the assertion result.
+
+        '''
+
+        arguments = self.parse_args(["EXPECT","ACTUAL","ONPASS","ONFAIL","NOT","OPERATOR"],**assertParam)
+
+        result = 0
+        valuetype = ''
+        operation = "not "+ str(arguments["OPERATOR"]) if arguments['NOT'] and arguments['NOT'] == 1 else arguments["OPERATOR"]
+        operators = {'equals':{'STR':'==','NUM':'=='}, 'matches' : '=~', 'greater':'>' ,'lesser':'<'}
+
+        expectMatch = re.match('^\s*[+-]?0(e0)?\s*$', str(arguments["EXPECT"]), re.I+re.M)
+        if not ((not expectMatch) and (arguments["EXPECT"]==0)):
+            valuetype = 'NUM'
+        else :
+            if arguments["OPERATOR"] == 'greater' or arguments["OPERATOR"] == 'lesser':
+                main.log.error("Numeric comparison on strings is not possibele")
+                return main.ERROR
+
+        valuetype = 'STR'
+        arguments["ACTUAL"] = str(arguments["ACTUAL"])
+        if arguments["OPERATOR"] != 'matches':
+            arguments["EXPECT"] = str(arguments["EXPECT"])
+
+        try :
+            opcode = operators[str(arguments["OPERATOR"])][valuetype] if arguments["OPERATOR"] == 'equals' else operators[str(arguments["OPERATOR"])]
+
+        except KeyError as e:
+            print "Key Error in assertion"
+            print e
+            return main.FALSE
+
+        if opcode == '=~':
+            try:
+                assert re.search(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
+                result = main.TRUE
+            except AssertionError:
+                try :
+                    assert re.match(str(arguments["EXPECT"]),str(arguments["ACTUAL"]))
+                    result = main.TRUE
+                except AssertionError:
+                    main.log.error("Assertion Failed")
+                    result = main.FALSE
+        else :
+            try:
+                if str(opcode)=="==":
+                    main.log.info("Verifying the Expected is equal to the actual or not using assert_equal")
+                    if (arguments["EXPECT"] == arguments["ACTUAL"]):
+                        result = main.TRUE
+                    else :
+                        result = main.FALSE
+                elif str(opcode) == ">":
+                    main.log.info("Verifying the Expected is Greater than the actual or not using assert_greater")
+                    if (ast.literal_eval(arguments["EXPECT"]) > ast.literal_eval(arguments["ACTUAL"])) :
+                        result = main.TRUE
+                    else :
+                        result = main.FALSE
+                elif str(opcode) == "<":
+                    main.log.info("Verifying the Expected is Lesser than the actual or not using assert_lesser")
+                    if (ast.literal_eval(arguments["EXPECT"]) < ast.literal_eval(arguments["ACTUAL"])):
+                        result = main.TRUE
+                    else :
+                        result = main.FALSE
+            except AssertionError:
+                main.log.error("Assertion Failed")
+                result = main.FALSE
+        result = result if result else 0
+        result = not result if arguments["NOT"] and arguments["NOT"] == 1 else result
+        resultString = ""
+        if result :
+            resultString = str(resultString) + "PASS"
+            main.log.info(arguments["ONPASS"])
+        else :
+            resultString = str(resultString) + "FAIL"
+            if not isinstance(arguments["ONFAIL"],str):
+                eval(str(arguments["ONFAIL"]))
+            else :
+                main.log.error(arguments["ONFAIL"])
+                main.log.report(arguments["ONFAIL"])
+                main.onFailMsg = arguments[ 'ONFAIL' ]
+
+        msg = arguments["ON" + str(resultString)]
+
+        if not isinstance(msg,str):
+            try:
+                eval(str(msg))
+            except SyntaxError as e:
+                print "function definition is not right"
+                print e
+
+        main.last_result = result
+        if main.stepResults[2]:
+            main.stepResults[2][-1] = result
+            try:
+                main.stepResults[3][-1] = arguments[ 'ONFAIL' ]
+            except AttributeError:
+                pass
+        else:
+            main.log.warn( "Assertion called before a test step" )
+        return result
+
+    def parse_args(self,args, **kwargs):
+        '''
+        It will accept the (key,value) pair and will return the (key,value) pairs with keys in uppercase.
+        '''
+        newArgs = {}
+        for key,value in kwargs.iteritems():
+            if isinstance(args,list) and str.upper(key) in args:
+                for each in args:
+                    if each==str.upper(key):
+                        newArgs [str(each)] = value
+                    elif each != str.upper(key) and (newArgs.has_key(str(each)) == False ):
+                        newArgs[str(each)] = None
+
+        return newArgs
+
+    def send_mail(self):
+        # Create a text/plain message
+        msg = email.mime.Multipart.MIMEMultipart()
+        try :
+            if main.test_target:
+                sub = "Result summary of \"" + main.TEST + "\" run on component \"" +\
+                      main.test_target + "\" Version \"" +\
+                      vars( main )[main.test_target].get_version() + "\": " +\
+                      str( main.TOTAL_TC_SUCCESS ) + "% Passed"
+            else :
+                sub = "Result summary of \"" + main.TEST + "\": " +\
+                      str( main.TOTAL_TC_SUCCESS ) + "% Passed"
+        except ( KeyError, AttributeError ):
+            sub = "Result summary of \"" + main.TEST + "\": " +\
+                  str( main.TOTAL_TC_SUCCESS ) + "% Passed"
+
+        msg['Subject'] = sub
+        msg['From'] = main.sender
+        msg['To'] = main.mail
+
+        # The main body is just another attachment
+        body = email.mime.Text.MIMEText( main.logHeader + "\n" +
+                                         main.testResult)
+        msg.attach( body )
+
+        # Attachments
+        for filename in os.listdir( main.logdir ):
+            filepath = main.logdir + "/" + filename
+            fp = open( filepath, 'rb' )
+            att = email.mime.application.MIMEApplication( fp.read(),
+                                                          _subtype="" )
+            fp.close()
+            att.add_header( 'Content-Disposition',
+                            'attachment',
+                            filename=filename )
+            msg.attach( att )
+        try:
+            smtp = smtplib.SMTP( main.smtp )
+            smtp.starttls()
+            smtp.login( main.sender, main.senderPwd )
+            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
+            smtp.quit()
+        except Exception:
+            main.log.exception( "Error sending email" )
+        return main.TRUE
+
+    def send_warning_email( self, subject=None ):
+        try:
+            if not subject:
+                subject = main.TEST + " PAUSED!"
+            # Create a text/plain message
+            msg = email.mime.Multipart.MIMEMultipart()
+
+            msg['Subject'] = subject
+            msg['From'] = main.sender
+            msg['To'] = main.mail
+
+            smtp = smtplib.SMTP( main.smtp )
+            smtp.starttls()
+            smtp.login( main.sender, main.senderPwd )
+            smtp.sendmail( msg['From'], [msg['To']], msg.as_string() )
+            smtp.quit()
+        except Exception:
+            main.log.exception( "" )
+            return main.FALSE
+        return main.TRUE
+
+    def parse(self,fileName):
+        '''
+        This will parse the params or topo or cfg file and return content in the file as Dictionary
+        '''
+        self.fileName = fileName
+        matchFileName = re.match(r'(.*)\.(cfg|params|topo)',self.fileName,re.M|re.I)
+        if matchFileName:
+            try :
+                parsedInfo = ConfigObj(self.fileName)
+                return parsedInfo
+            except StandardError:
+                print "There is no such file to parse "+fileName
+        else:
+            return 0
+
+    def retry( self, f, retValue, args=(), kwargs={},
+               sleep=1, attempts=2, randomTime=False ):
+        """
+        Given a function and bad return values, retry will retry a function
+        until successful or give up after a certain number of attempts.
+
+        Arguments:
+        f        - a callable object
+        retValue - Return value(s) of f to retry on. This can be a list or an
+                   object.
+        args     - A tuple containing the arguments of f.
+        kwargs   - A dictionary containing the keyword arguments of f.
+        sleep    - Time in seconds to sleep between retries. If random is True,
+                   this is the max time to wait. Defaults to 1 second.
+        attempts - Max number of attempts before returning. If set to 1,
+                   f will only be called once. Defaults to 2 trys.
+        random   - Boolean indicating if the wait time is random between 0
+                   and sleep or exactly sleep seconds. Defaults to False.
+        """
+        # TODO: be able to pass in a conditional statement(s). For example:
+        #      retCondition = "< 7"
+        #      Then we do something like 'if eval( "ret " + retCondition ):break'
+        try:
+            assert attempts > 0, "attempts must be more than 1"
+            assert sleep >= 0, "sleep must be >= 0"
+            if not isinstance( retValue, list ):
+                retValue = [ retValue ]
+            for i in range( 0, attempts ):
+                ret = f( *args, **kwargs )
+                if ret not in retValue:
+                # NOTE that False in [ 0 ] == True
+                    break
+                if randomTime:
+                    sleeptime = random.randint( 0, sleep )
+                else:
+                    sleeptime = sleep
+                time.sleep( sleeptime )
+            return ret
+        except AssertionError:
+            main.log.exception( "Invalid arguements for retry: " )
+            main.cleanup()
+            main.exit()
+        except Exception:
+            main.log.exception( "Uncaught exception in retry: " )
+            main.cleanup()
+            main.exit()
+
+utilities = Utilities()
+
+if __name__ != "__main__":
+    import sys
+
+    sys.modules[__name__] = Utilities()
diff --git a/src/test/dhcp/__init__.py b/src/test/dhcp/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/dhcp/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/dhcp/dhcpTest.py b/src/test/dhcp/dhcpTest.py
index 5be99b7..1b4f37d 100644
--- a/src/test/dhcp/dhcpTest.py
+++ b/src/test/dhcp/dhcpTest.py
@@ -4,14 +4,9 @@
 from twisted.internet import defer
 from scapy.all import *
 import time
-import os, sys
 import copy
-CORD_TEST_UTILS = 'utils'
-test_root = os.getenv('CORD_TEST_ROOT') or './'
-sys.path.append(test_root + CORD_TEST_UTILS)
 from DHCP import DHCPTest
 from OnosCtrl import OnosCtrl
-
 log.setLevel('INFO')
 
 class dhcp_exchange(unittest.TestCase):
@@ -33,6 +28,7 @@
 
     def setUp(self):
         ''' Activate the dhcp app'''
+        self.maxDiff = None ##for assert_equal compare outputs on failure
         self.onos_ctrl = OnosCtrl(self.app)
         status, _ = self.onos_ctrl.activate()
         assert_equal(status, True)
@@ -43,11 +39,11 @@
         self.onos_ctrl.deactivate()
 
     def onos_load_config(self, config):
-        status, code = self.onos_ctrl.config(config)
+        status, code = OnosCtrl.config(config)
         if status is False:
             log.info('JSON request returned status %d' %code)
             assert_equal(status, True)
-        time.sleep(2)
+        time.sleep(3)
 
     def onos_dhcp_table_load(self, config = None):
           dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
@@ -58,12 +54,13 @@
                       dhcp_config[k] = config[k]
           self.onos_load_config(dhcp_dict)
 
-    def send_recv(self, update_seed = False):
-        cip, sip = self.dhcp.send(update_seed = update_seed)
-        assert_not_equal(cip, None)
-        assert_not_equal(sip, None)
-        log.info('Got dhcp client IP %s from server %s for mac %s' %
-                 (cip, sip, self.dhcp.get_mac(cip)[0]))
+    def send_recv(self, mac = None, update_seed = False, validate = True):
+        cip, sip = self.dhcp.discover(mac = mac, update_seed = update_seed)
+        if validate:
+            assert_not_equal(cip, None)
+            assert_not_equal(sip, None)
+            log.info('Got dhcp client IP %s from server %s for mac %s' %
+                     (cip, sip, self.dhcp.get_mac(cip)[0]))
         return cip,sip
 
     def test_dhcp_1request(self, iface = 'veth0'):
@@ -87,3 +84,313 @@
                 log.info('IP %s given out multiple times' %cip)
                 assert_equal(False, ip_map.has_key(cip))
             ip_map[cip] = sip
+
+    def test_dhcp_1release(self, iface = 'veth0'):
+        config = {'startip':'10.10.100.20', 'endip':'10.10.100.21', 
+                  'ip':'10.10.100.2', 'mac': "ca:fe:ca:fe:8a:fe",
+                  'subnet': '255.255.255.0', 'broadcast':'10.10.100.255', 'router':'10.10.100.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '10.10.100.10', iface = iface)
+        cip, sip = self.send_recv()
+        log.info('Releasing ip %s to server %s' %(cip, sip))
+        assert_equal(self.dhcp.release(cip), True)
+        log.info('Triggering DHCP discover again after release')
+        cip2, sip2 = self.send_recv(update_seed = True)
+        log.info('Verifying released IP was given back on rediscover')
+        assert_equal(cip, cip2)
+        log.info('Test done. Releasing ip %s to server %s' %(cip2, sip2))
+        assert_equal(self.dhcp.release(cip2), True)
+
+    def test_dhcp_Nrelease(self, iface = 'veth0'):
+        config = {'startip':'192.170.1.20', 'endip':'192.170.1.30', 
+                  'ip':'192.170.1.2', 'mac': "ca:fe:ca:fe:9a:fe",
+                  'subnet': '255.255.255.0', 'broadcast':'192.170.1.255', 'router': '192.170.1.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '192.170.1.10', iface = iface)
+        ip_map = {}
+        for i in range(10):
+            cip, sip = self.send_recv(update_seed = True)
+            if ip_map.has_key(cip):
+                log.info('IP %s given out multiple times' %cip)
+                assert_equal(False, ip_map.has_key(cip))
+            ip_map[cip] = sip
+
+        for ip in ip_map.keys():
+            log.info('Releasing IP %s' %ip)
+            assert_equal(self.dhcp.release(ip), True)
+
+        ip_map2 = {}
+        log.info('Triggering DHCP discover again after release')
+        for i in range(len(ip_map.keys())):
+            cip, sip = self.send_recv(update_seed = True)
+            ip_map2[cip] = sip
+
+        log.info('Verifying released IPs were given back on rediscover')
+        if ip_map != ip_map2:
+            log.info('Map before release %s' %ip_map)
+            log.info('Map after release %s' %ip_map2)
+        assert_equal(ip_map, ip_map2)
+
+
+    def test_dhcp_starvation(self, iface = 'veth0'):
+        config = {'startip':'193.170.1.20', 'endip':'193.170.1.69', 
+                  'ip':'193.170.1.2', 'mac': "ca:fe:c2:fe:cc:fe",
+                  'subnet': '255.255.255.0', 'broadcast':'192.168.1.255', 'router': '192.168.1.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '192.169.1.1', iface = iface)
+        ip_map = {}
+        for i in range(10):
+            cip, sip = self.send_recv(update_seed = True)
+            if ip_map.has_key(cip):
+                log.info('IP %s given out multiple times' %cip)
+                assert_equal(False, ip_map.has_key(cip))
+            ip_map[cip] = sip
+
+
+    def test_dhcp_starvation(self, iface = 'veth0'):
+        '''DHCP starve'''
+        config = {'startip':'182.17.0.20', 'endip':'182.17.0.69', 
+                  'ip':'182.17.0.2', 'mac': "ca:fe:c3:fe:ca:fe",
+                  'subnet': '255.255.255.0', 'broadcast':'182.17.0.255', 'router':'182.17.0.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '182.17.0.1', iface = iface)
+        log.info('Verifying 1 ')
+        for x in xrange(50):
+            mac = RandMAC()._fix()
+            self.send_recv(mac = mac)
+        log.info('Verifying 2 ')
+        cip, sip = self.send_recv(update_seed = True, validate = False)
+        assert_equal(cip, None)
+        assert_equal(sip, None)
+
+
+    def test_dhcp_same_client_multiple_discover(self, iface = 'veth0'):
+	''' DHCP Client sending multiple discover . '''
+	config = {'startip':'10.10.10.20', 'endip':'10.10.10.69', 
+                 'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover()
+	log.info('Got dhcp client IP %s from server %s for mac %s . Not going to send DHCPREQUEST.' %  
+		  (cip, sip, mac) )
+	log.info('Triggering DHCP discover again.')
+	new_cip, new_sip, new_mac = self.dhcp.only_discover()
+	if cip == new_cip:
+		log.info('Got same ip for 2nd DHCP discover for client IP %s from server %s for mac %s. Triggering DHCP Request. '
+			  % (new_cip, new_sip, new_mac) )
+	elif cip != new_cip:
+		log.info('Ip after 1st discover %s' %cip)
+                log.info('Map after 2nd discover %s' %new_cip)
+		assert_equal(cip, new_cip)
+
+
+    def test_dhcp_same_client_multiple_request(self, iface = 'veth0'):
+	''' DHCP Client sending multiple repeat DHCP requests. '''
+	config = {'startip':'10.10.10.20', 'endip':'10.10.10.69', 
+                 'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '10.10.10.1', iface = iface)
+	log.info('Sending DHCP discover and DHCP request.')
+	cip, sip = self.send_recv()
+	mac = self.dhcp.get_mac(cip)[0]
+	log.info("Sending DHCP request again.")
+	new_cip, new_sip = self.dhcp.only_request(cip, mac)
+	if (new_cip,new_sip) == (cip,sip):
+		
+		log.info('Got same ip for 2nd DHCP Request for client IP %s from server %s for mac %s.'
+			  % (new_cip, new_sip, mac) )
+	elif (new_cip,new_sip):
+		
+		log.info('No DHCP ACK')
+		assert_equal(new_cip, None)
+		assert_equal(new_sip, None)
+	else:
+		print "Something went wrong."	
+    
+    def test_dhcp_client_desired_address(self, iface = 'veth0'):
+	'''DHCP Client asking for desired IP address.'''
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.31', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover(desired = True)
+	log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+	if cip == self.dhcp.seed_ip:
+		log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %  
+		  (cip, sip, mac) )
+	elif cip != self.dhcp.seed_ip:
+		log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+		log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
+		assert_equal(cip, self.dhcp.seed_ip)
+		
+		
+    def test_dhcp_client_desired_address_out_of_pool(self, iface = 'veth0'):
+	'''DHCP Client asking for desired IP address from out of pool.'''
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.35', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover(desired = True)
+	log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+	if cip == self.dhcp.seed_ip:
+		log.info('Got dhcp client IP %s from server %s for mac %s as desired .' %  
+		  (cip, sip, mac) )
+		assert_equal(cip, self.dhcp.seed_ip) #Negative Test Case
+
+	elif cip != self.dhcp.seed_ip:
+		log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+		log.info('The desired ip was: %s .' % self.dhcp.seed_ip)
+		assert_not_equal(cip, self.dhcp.seed_ip)
+
+	elif cip == None:
+		log.info('Got DHCP NAK')
+	
+			
+    def test_dhcp_server_nak_packet(self, iface = 'veth0'):
+	''' Client sends DHCP Request for ip that is different from DHCP offer packet.'''
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover()
+	log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+	
+	log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
+	if (cip == None and mac != None):
+		log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
+		assert_not_equal(cip, None)
+	else:
+		new_cip, new_sip = self.dhcp.only_request('20.20.20.31', mac)
+		if new_cip == None:
+			
+			log.info("Got DHCP server NAK.")
+			assert_equal(new_cip, None)  #Negative Test Case
+		
+	
+    def test_dhcp_lease_packet(self, iface = 'veth0'):
+	''' Client sends DHCP Discover packet for particular lease time.'''
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
+	log.info('Sending DHCP discover with lease time of 700')
+	cip, sip, mac, lval = self.dhcp.only_discover(lease_time = True)
+
+	log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
+	if (cip == None and mac != None):
+		log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
+		assert_not_equal(cip, None)
+	elif lval != 700:
+		log.info('Getting dhcp client IP %s from server %s for mac %s with lease time %s. That is not 700.' %  
+		 	 (cip, sip, mac, lval) )
+		assert_not_equal(lval, 700)
+
+    def test_dhcp_client_request_after_reboot(self, iface = 'veth0'):
+	#''' Client sends DHCP Request after reboot.'''
+	
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover()
+	log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+	
+	log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
+	
+	if (cip == None and mac != None):
+		log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
+		assert_not_equal(cip, None)
+		
+	else:
+		new_cip, new_sip = self.dhcp.only_request(cip, mac)
+		if new_cip == None:
+			log.info("Got DHCP server NAK.")
+		os.system('ifconfig '+iface+' down')
+		log.info('Client goes down.')
+		log.info('Delay for 5 seconds.')
+		
+		time.sleep(5)
+		
+		os.system('ifconfig '+iface+' up')
+		log.info('Client is up now.')
+		
+		new_cip, new_sip = self.dhcp.only_request(cip, mac)
+		if new_cip == None:
+			log.info("Got DHCP server NAK.")
+			assert_not_equal(new_cip, None)
+		elif new_cip != None:
+			log.info("Got DHCP ACK.")
+    
+	
+     
+    def test_dhcp_server_after_reboot(self, iface = 'veth0'):
+	''' DHCP server goes down.'''
+	config = {'startip':'20.20.20.30', 'endip':'20.20.20.69', 
+                 'ip':'20.20.20.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                 'subnet': '255.255.255.0', 'broadcast':'20.20.20.255', 'router':'20.20.20.1'}
+        self.onos_dhcp_table_load(config)
+        self.dhcp = DHCPTest(seed_ip = '20.20.20.45', iface = iface)
+	cip, sip, mac = self.dhcp.only_discover()
+	log.info('Got dhcp client IP %s from server %s for mac %s .' %  
+		  (cip, sip, mac) )
+	
+	log.info("Verifying Client 's IP and mac in DHCP Offer packet. Those should not be none, which is expected.")
+	
+	if (cip == None and mac != None):
+		log.info("Verified that Client 's IP and mac in DHCP Offer packet are none, which is not expected behavior.")
+		assert_not_equal(cip, None)
+		
+	else:
+		new_cip, new_sip = self.dhcp.only_request(cip, mac)
+		if new_cip == None:
+			log.info("Got DHCP server NAK.")
+			assert_not_equal(new_cip, None)
+		log.info('Getting DHCP server Down.')
+        	
+		self.onos_ctrl.deactivate()
+			
+		for i in range(0,4):
+			log.info("Sending DHCP Request.")
+			log.info('')
+			new_cip, new_sip = self.dhcp.only_request(cip, mac)
+			if new_cip == None and new_sip == None:
+				log.info('')
+				log.info("DHCP Request timed out.")
+			elif new_cip and new_sip:
+				log.info("Got Reply from DHCP server.")
+				assert_equal(new_cip,None) #Neagtive Test Case
+		
+		log.info('Getting DHCP server Up.')
+        
+		status, _ = self.onos_ctrl.activate()
+        	assert_equal(status, True)
+        	time.sleep(3)
+		
+		for i in range(0,4):
+			log.info("Sending DHCP Request after DHCP server is up.")
+			log.info('')
+			new_cip, new_sip = self.dhcp.only_request(cip, mac)
+			if new_cip == None and new_sip == None:
+				log.info('')
+				log.info("DHCP Request timed out.")
+			elif new_cip and new_sip:
+				log.info("Got Reply from DHCP server.")
+				assert_equal(new_cip,None) #Neagtive Test Case
+		
+
+	
+ 
+
diff --git a/src/test/flows/__init__.py b/src/test/flows/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/flows/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/flows/flowsTest.py b/src/test/flows/flowsTest.py
new file mode 100644
index 0000000..88a7d0b
--- /dev/null
+++ b/src/test/flows/flowsTest.py
@@ -0,0 +1,102 @@
+import unittest
+from nose.tools import *
+from nose.twistedtools import reactor, deferred
+from twisted.internet import defer
+from scapy.all import *
+import time
+import json
+import threading
+from OnosCtrl import OnosCtrl
+from OnosFlowCtrl import OnosFlowCtrl, get_mac
+from OltConfig import OltConfig
+log.setLevel('INFO')
+
+class flows_exchange(unittest.TestCase):
+
+    #Use the first available device id as our device id to program flows
+    app = 'org.onosproject.cli'
+    PORT_TX_DEFAULT = 2
+    PORT_RX_DEFAULT = 1
+    INTF_TX_DEFAULT = 'veth2'
+    INTF_RX_DEFAULT = 'veth0'
+    default_port_map = { 
+        PORT_TX_DEFAULT : INTF_TX_DEFAULT,
+        PORT_RX_DEFAULT : INTF_RX_DEFAULT,
+        INTF_TX_DEFAULT : PORT_TX_DEFAULT,
+        INTF_RX_DEFAULT : PORT_RX_DEFAULT
+        }
+
+    @classmethod
+    def setUpClass(cls):
+        cls.olt = OltConfig()
+        cls.port_map = cls.olt.olt_port_map()
+        if not cls.port_map:
+            cls.port_map = cls.default_port_map
+        cls.device_id = 'of:' + get_mac() ##match against our device id
+
+    def test_flow_mac(self):
+        '''Add and verify flows with MAC selectors'''
+        egress = 1
+        ingress = 2
+        egress_mac = '00:00:00:00:00:01'
+        ingress_mac = '00:00:00:00:00:02'
+        flow = OnosFlowCtrl(deviceId = self.device_id,
+                            egressPort = egress,
+                            ingressPort = ingress,
+                            ethSrc = ingress_mac,
+                            ethDst = egress_mac)
+        result = flow.addFlow()
+        assert_equal(result, True)
+        ##wait for flows to be added to ONOS
+        time.sleep(3)
+        self.success = False
+        def mac_recv_task():
+            def recv_cb(pkt):
+                log.info('Pkt seen with ingress mac %s, egress mac %s' %(pkt.src, pkt.dst))
+                self.success = True
+            sniff(count=2, timeout=5, lfilter = lambda p: p.src == ingress_mac, 
+                  prn = recv_cb, iface = self.port_map[egress])
+
+        t = threading.Thread(target = mac_recv_task)
+        t.start()
+        pkt = Ether(src = ingress_mac, dst = egress_mac)/IP()
+        log.info('Sending a packet to verify if flows are correct')
+        sendp(pkt, count=50, iface = self.port_map[ingress])
+        t.join()
+        assert_equal(self.success, True)
+
+    def test_flow_ip(self):
+        '''Add and verify flows with IPv4 selectors'''
+        egress = 1
+        ingress = 2
+        egress_map = { 'ether': '00:00:00:00:00:03', 'ip': '192.168.30.1' }
+        ingress_map = { 'ether': '00:00:00:00:00:04', 'ip': '192.168.40.1' }
+        flow = OnosFlowCtrl(deviceId = self.device_id,
+                            egressPort = egress,
+                            ingressPort = ingress,
+                            ethType = '0x0800',
+                            ipSrc = ('IPV4_SRC', ingress_map['ip']+'/32'),
+                            ipDst = ('IPV4_DST', egress_map['ip']+'/32')
+                            )
+        result = flow.addFlow()
+        assert_equal(result, True)
+        ##wait for flows to be added to ONOS
+        time.sleep(3)
+        self.success = False
+        def mac_recv_task():
+            def recv_cb(pkt):
+                log.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
+                self.success = True
+            sniff(count=2, timeout=5, 
+                  lfilter = lambda p: IP in p and p[IP].dst == egress_map['ip'] and p[IP].src == ingress_map['ip'],
+                  prn = recv_cb, iface = self.port_map[egress])
+
+        t = threading.Thread(target = mac_recv_task)
+        t.start()
+        L2 = Ether(src = ingress_map['ether'], dst = egress_map['ether'])
+        L3 = IP(src = ingress_map['ip'], dst = egress_map['ip'])
+        pkt = L2/L3
+        log.info('Sending a packet to verify if flows are correct')
+        sendp(pkt, count=50, iface = self.port_map[ingress])
+        t.join()
+        assert_equal(self.success, True)
diff --git a/src/test/fsm/noseMd5AuthHolder.py b/src/test/fsm/noseMd5AuthHolder.py
new file mode 100644
index 0000000..908c25c
--- /dev/null
+++ b/src/test/fsm/noseMd5AuthHolder.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+def initMd5AuthHolderFsmTable(obj,St,Ev):
+    return {
+
+    ## CurrentState                          Event                                      Actions                   NextState
+
+      (St.ST_EAP_SETUP,                      Ev.EVT_EAP_SETUP                       ):( (obj._eapSetup,),         St.ST_EAP_START),
+
+    ## CurrentState                          Event                                      Actions                   NextState
+
+      (St.ST_EAP_MD5_CHALLENGE,              Ev.EVT_EAP_MD5_CHALLENGE               ):( (obj._eapMd5Challenge,),  St.ST_EAP_STATUS),
+
+    ## CurrentState                          Event                                      Actions                   NextState
+
+      (St.ST_EAP_STATUS,                     Ev.EVT_EAP_STATUS                      ):( (obj._eapStatus,),        St.ST_EAP_MD5_DONE),
+
+    ## CurrentState                          Event                                      Actions                   NextState
+
+      (St.ST_EAP_ID_REQ,                     Ev.EVT_EAP_ID_REQ                      ):( (obj._eapIdReq,),         St.ST_EAP_MD5_CHALLENGE),
+
+    ## CurrentState                          Event                                      Actions                   NextState
+
+      (St.ST_EAP_START,                      Ev.EVT_EAP_START                       ):( (obj._eapStart,),         St.ST_EAP_ID_REQ),
+
+}
+
diff --git a/src/test/fsm/noseTlsAuthHolder.py b/src/test/fsm/noseTlsAuthHolder.py
index 127b10b..4e2fb35 100644
--- a/src/test/fsm/noseTlsAuthHolder.py
+++ b/src/test/fsm/noseTlsAuthHolder.py
@@ -3,25 +3,33 @@
 def initTlsAuthHolderFsmTable(obj,St,Ev):
     return {
 
-    ## CurrentState                          Event                                      Actions                  NextState
+    ## CurrentState                                   Event                                               Actions                          NextState
 
-      (St.ST_EAP_SETUP,                      Ev.EVT_EAP_SETUP                       ):( (obj._eapSetup,),        St.ST_EAP_START),
+      (St.ST_EAP_TLS_HELLO_REQ,                       Ev.EVT_EAP_TLS_HELLO_REQ                        ):( (obj._eapTlsHelloReq,),          St.ST_EAP_TLS_CERT_REQ),
 
-    ## CurrentState                          Event                                      Actions                  NextState
+    ## CurrentState                                   Event                                               Actions                          NextState
 
-      (St.ST_EAP_TLS_HELLO_REQ,              Ev.EVT_EAP_TLS_HELLO_REQ               ):( (obj._eapTlsHelloReq,),  St.ST_EAP_TLS_CERT_REQ),
+      (St.ST_EAP_ID_REQ,                              Ev.EVT_EAP_ID_REQ                               ):( (obj._eapIdReq,),                St.ST_EAP_TLS_HELLO_REQ),
 
-    ## CurrentState                          Event                                      Actions                  NextState
+    ## CurrentState                                   Event                                               Actions                          NextState
 
-      (St.ST_EAP_START,                      Ev.EVT_EAP_START                       ):( (obj._eapStart,),        St.ST_EAP_ID_REQ),
+      (St.ST_EAP_SETUP,                               Ev.EVT_EAP_SETUP                                ):( (obj._eapSetup,),                St.ST_EAP_START),
 
-    ## CurrentState                          Event                                      Actions                  NextState
+    ## CurrentState                                   Event                                               Actions                          NextState
 
-      (St.ST_EAP_ID_REQ,                     Ev.EVT_EAP_ID_REQ                      ):( (obj._eapIdReq,),        St.ST_EAP_TLS_HELLO_REQ),
+      (St.ST_EAP_TLS_FINISHED,                        Ev.EVT_EAP_TLS_FINISHED                         ):( (obj._eapTlsFinished,),          St.ST_EAP_TLS_DONE),
 
-    ## CurrentState                          Event                                      Actions                  NextState
+    ## CurrentState                                   Event                                               Actions                          NextState
 
-      (St.ST_EAP_TLS_CERT_REQ,               Ev.EVT_EAP_TLS_CERT_REQ                ):( (obj._eapTlsCertReq,),   St.ST_EAP_TLS_DONE),
+      (St.ST_EAP_START,                               Ev.EVT_EAP_START                                ):( (obj._eapStart,),                St.ST_EAP_ID_REQ),
+
+    ## CurrentState                                   Event                                               Actions                          NextState
+
+      (St.ST_EAP_TLS_CHANGE_CIPHER_SPEC,              Ev.EVT_EAP_TLS_CHANGE_CIPHER_SPEC               ):( (obj._eapTlsChangeCipherSpec,),  St.ST_EAP_TLS_FINISHED),
+
+    ## CurrentState                                   Event                                               Actions                          NextState
+
+      (St.ST_EAP_TLS_CERT_REQ,                        Ev.EVT_EAP_TLS_CERT_REQ                         ):( (obj._eapTlsCertReq,),           St.ST_EAP_TLS_CHANGE_CIPHER_SPEC),
 
 }
 
diff --git a/src/test/igmp/__init__.py b/src/test/igmp/__init__.py
new file mode 100644
index 0000000..900be31
--- /dev/null
+++ b/src/test/igmp/__init__.py
@@ -0,0 +1,5 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+__path__.append(utils_dir)
diff --git a/src/test/igmp/igmpTest.py b/src/test/igmp/igmpTest.py
index bc3463b..b6ad1a2 100644
--- a/src/test/igmp/igmpTest.py
+++ b/src/test/igmp/igmpTest.py
@@ -8,24 +8,14 @@
 import tempfile
 import random
 import threading
-
-CORD_TEST_UTILS = 'utils'
-test_root = os.getenv('CORD_TEST_ROOT') or './'
-sys.path.append(test_root + CORD_TEST_UTILS)
 from IGMP import *
 from McastTraffic import *
 from Stats import Stats
 from OnosCtrl import OnosCtrl
+from OltConfig import OltConfig
+from Channels import IgmpChannel
 log.setLevel('INFO')
 
-IGMP_DST_MAC = "01:00:5e:00:01:01"
-IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
-IP_SRC = '1.2.3.4'
-IP_DST = '224.0.1.1'
-
-igmp_eth = Ether(dst = IGMP_DST_MAC, src = IGMP_SRC_MAC, type = ETH_P_IP)
-igmp_ip = IP(dst = IP_DST, src = IP_SRC)
-
 class IGMPTestState:
 
       def __init__(self, groups = [], df = None, state = 0):
@@ -49,24 +39,52 @@
 
 class igmp_exchange(unittest.TestCase):
 
+    V_INF1 = 'veth0'
+    V_INF2 = 'veth1'
+    MGROUP1 = '239.1.2.3'
+    MGROUP2 = '239.2.2.3'
+    MINVALIDGROUP1 = '255.255.255.255'
+    MINVALIDGROUP2 = '239.255.255.255'
+    MMACGROUP1 = "01:00:5e:01:02:03"
+    MMACGROUP2 = "01:00:5e:02:02:03"
+    IGMP_DST_MAC = "01:00:5e:00:01:01"
+    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
+    IP_SRC = '1.2.3.4'
+    IP_DST = '224.0.1.1'
+    NEGATIVE_TRAFFIC_STATUS = 1
+    igmp_eth = Ether(dst = IGMP_DST_MAC, src = IGMP_SRC_MAC, type = ETH_P_IP)
+    igmp_ip = IP(dst = IP_DST, src = IP_SRC)
     IGMP_TEST_TIMEOUT = 5
+    IGMP_QUERY_TIMEOUT = 60
     MCAST_TRAFFIC_TIMEOUT = 10
+    PORT_TX_DEFAULT = 2
+    PORT_RX_DEFAULT = 1
     max_packets = 100
     app = 'org.onosproject.igmp'
+    olt_conf_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../setup/olt_config.json')
 
+    @classmethod
+    def setUpClass(cls):
+          cls.olt = OltConfig(olt_conf_file = cls.olt_conf_file)
+          OnosCtrl.cord_olt_config(cls.olt.olt_device_data())
+
+    @classmethod
+    def tearDownClass(cls): pass
+          
     def setUp(self):
         ''' Activate the dhcp app'''
         self.onos_ctrl = OnosCtrl(self.app)
         status, _ = self.onos_ctrl.activate()
         assert_equal(status, True)
-        time.sleep(3)
+        time.sleep(2)
+        self.igmp_channel = IgmpChannel()
 
     def teardown(self):
         '''Deactivate the dhcp app'''
         self.onos_ctrl.deactivate()
 
     def onos_load_config(self, config):
-        status, code = self.onos_ctrl.config(config)
+        status, code = OnosCtrl.config(config)
         if status is False:
             log.info('JSON request returned status %d' %code)
             assert_equal(status, True)
@@ -78,10 +96,14 @@
           for g in groups:
                 for s in src_list:
                       d = {}
-                      d['source'] = s
+                      d['source'] = s or '0.0.0.0'
                       d['group'] = g
                       ssm_xlate_list.append(d)
           self.onos_load_config(ssm_dict)
+          cord_port_map = {}
+          for g in groups:
+                cord_port_map[g] = (self.PORT_TX_DEFAULT, self.PORT_RX_DEFAULT)
+          self.igmp_channel.cord_port_table_load(cord_port_map)
           time.sleep(2)
 
     def igmp_verify_join(self, igmpStateList):
@@ -126,41 +148,90 @@
     ##Runs in the context of twisted reactor thread
     def igmp_recv(self, igmpState, iface = 'veth0'):
         p = self.recv_socket.recv()
-        send_time = float(p.payload.load)
-        recv_time = monotonic.monotonic()
+        try:
+              send_time = float(p.payload.load)
+              recv_time = monotonic.monotonic()
+        except:
+              log.info('Unexpected Payload received: %s' %p.payload.load)
+              return 0
         #log.info( 'Recv in %.6f secs' %(recv_time - send_time))
         igmpState.update(p.dst, rx = 1, t = recv_time - send_time)
         return 0
 
-    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], iface = 'veth0', delay = 2):
+    def send_igmp_join(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
         self.onos_ssm_table_load(groups, src_list)
         igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr='224.0.1.1')
+                      gaddr=self.IP_DST)
         for g in groups:
               gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
               gr.sources = src_list
               igmp.grps.append(gr)
-
-        pkt = igmp_eth/igmp_ip/igmp
+        if ip_pkt is None:
+              ip_pkt = self.igmp_eth/self.igmp_ip
+        pkt = ip_pkt/igmp
         IGMPv3.fixup(pkt)
         sendp(pkt, iface=iface)
         if delay != 0:
             time.sleep(delay)
 
-    def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], iface = 'veth0', delay = 2):
+    def send_igmp_join_recvQuery(self, groups, rec_queryCount = None, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
+        self.onos_ssm_table_load(groups, src_list)
         igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                      gaddr='224.0.1.1')
+                      gaddr=self.IP_DST)
+        for g in groups:
+              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
+              gr.sources = src_list
+              gr.sources = src_list
+              igmp.grps.append(gr)
+        if ip_pkt is None:
+              ip_pkt = self.igmp_eth/self.igmp_ip
+        pkt = ip_pkt/igmp
+        IGMPv3.fixup(pkt)
+        if rec_queryCount == None:
+            log.info('Sending IGMP join for group %s and waiting for one query packet and printing the packet' %groups)
+            resp = srp1(pkt, iface=iface)
+        else:
+            log.info('Sending IGMP join for group %s and waiting for periodic query packets and printing one packet' %groups)
+            resp = srp3(pkt, iface=iface)
+#       resp = srp1(pkt, iface=iface) if rec_queryCount else srp3(pkt, iface=iface)
+        resp[0].summary()
+        log.info('Sent IGMP join for group %s and received a query packet and  printing packet' %groups)
+        if delay != 0:
+            time.sleep(delay)
+
+    def send_igmp_leave(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
+        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                      gaddr=self.IP_DST)
         for g in groups:
               gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
               gr.sources = src_list
               igmp.grps.append(gr)
-
-        pkt = igmp_eth/igmp_ip/igmp
+        if ip_pkt is None:
+              ip_pkt = self.igmp_eth/self.igmp_ip
+        pkt = ip_pkt/igmp
         IGMPv3.fixup(pkt)
         sendp(pkt, iface = iface)
         if delay != 0:
             time.sleep(delay)
 
+    def send_igmp_leave_listening_group_specific_query(self, groups, src_list = ['1.2.3.4'], ip_pkt = None, iface = 'veth0', delay = 2):
+        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                      gaddr=self.IP_DST)
+        for g in groups:
+              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
+              gr.sources = src_list
+              igmp.grps.append(gr)
+        if ip_pkt is None:
+              ip_pkt = self.igmp_eth/self.igmp_ip
+        pkt = ip_pkt/igmp
+        IGMPv3.fixup(pkt)
+        log.info('Sending IGMP leave for group %s and waiting for one group specific query packet and printing the packet' %groups)
+        resp = srp1(pkt, iface=iface)
+        resp[0].summary()
+        log.info('Sent IGMP leave for group %s and received a group specific query packet and printing packet' %groups)
+        if delay != 0:
+            time.sleep(delay)
+
     @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
     def test_igmp_join_verify_traffic(self):
         groups = ['224.0.1.1', '225.0.0.1']
@@ -257,7 +328,7 @@
     def igmp_join_task(self, intf, groups, state, src_list = ['1.2.3.4']):
           self.onos_ssm_table_load(groups, src_list)
           igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
-                        gaddr='224.0.1.1')
+                        gaddr=self.IP_DST)
           for g in groups:
                 gr = IGMPv3gr(rtype = IGMP_V3_GR_TYPE_EXCLUDE, mcaddr = g)
                 gr.sources = src_list
@@ -266,7 +337,7 @@
           for g in groups:
                 state.group_map[g][0].update(1, t = monotonic.monotonic())
 
-          pkt = igmp_eth/igmp_ip/igmp
+          pkt = self.igmp_eth/self.igmp_ip/igmp
           IGMPv3.fixup(pkt)
           sendp(pkt, iface=intf)
           log.debug('Returning from join task')
@@ -277,6 +348,7 @@
           for g in groups:
                 group_map[g] = [0,0]
 
+          log.info('Verifying join interface should receive multicast data')
           while True:
                 p = recv_socket.recv()
                 if p.dst in groups and group_map[p.dst][0] == 0:
@@ -296,7 +368,25 @@
 
           recv_socket.close()
           log.debug('Returning from recv task')
-          
+
+    def igmp_not_recv_task(self, intf, groups, join_state):
+          global NEGATIVE_TRAFFIC_STATUS
+          recv_socket = L2Socket(iface = intf, type = ETH_P_IP)
+          group_map = {}
+          for g in groups:
+                group_map[g] = [0,0]
+
+          log.info('Verifying join interface should not receive any multicast data')
+          NEGATIVE_TRAFFIC_STATUS = 1
+          def igmp_recv_cb(pkt):
+                log.info('Multicast packet %s received for left groups %s' %(pkt[IP].dst, groups))
+                global NEGATIVE_TRAFFIC_STATUS
+                NEGATIVE_TRAFFIC_STATUS = 2
+          sniff(prn = igmp_recv_cb, count = 1, lfilter = lambda p: p[IP].dst in groups,
+                timeout = 3, opened_socket = recv_socket)
+          recv_socket.close()
+          return NEGATIVE_TRAFFIC_STATUS 
+
     def group_latency_check(self, groups):
           tasks = []
           self.send_igmp_leave(groups = groups)
@@ -329,3 +419,1289 @@
           self.group_latency_check(groups)
 
           
+    def test_igmp_join_rover(self):
+          '''Keep sending joins across multicast range of addresses'''
+          '''For now, restricting it to 50/100'''
+          s = (224 << 24) | 1
+          #e = (225 << 24) | (255 << 16) | (255 << 16) | 255
+          e = (224 << 24) | 10
+          for i in xrange(s, e+1):
+                if i&0xff:
+                      ip = '%d.%d.%d.%d'%((i>>24)&0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
+                self.send_igmp_join([ip], delay = 0)
+
+    @deferred(timeout=IGMP_QUERY_TIMEOUT + 10)
+    def test_igmp_query(self):
+        groups = ['224.0.0.1'] ##igmp query group
+        df = defer.Deferred()
+        self.df = df
+        self.recv_socket = L2Socket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_query_timeout():
+              def igmp_query_cb(pkt):
+                    log.info('Got IGMP query packet from %s for %s' %(pkt[IP].src, pkt[IP].dst))
+                    assert_equal(pkt[IP].dst, '224.0.0.1')
+
+              sniff(prn = igmp_query_cb, count=1, lfilter = lambda p: IP in p and p[IP].dst in groups,
+                    opened_socket = self.recv_socket)
+              self.recv_socket.close()
+              self.df.callback(0)
+
+        self.send_igmp_join(groups)
+        self.test_timer = reactor.callLater(self.IGMP_QUERY_TIMEOUT, igmp_query_timeout)
+        return df
+
+    def igmp_send_joins_different_groups_srclist(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None):
+        g1 = groups[0]
+        g2 = groups[1]
+        sourcelist1 = sources[0]
+        sourcelist2 = sources[1]
+        eth = Ether(dst = self.MMACGROUP1, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
+        src_ip = ip_src or self.IP_SRC
+        ip = IP(dst = g1, src = src_ip)
+        log.info('Sending join message for the group %s' %g1)
+        self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
+        eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
+        ip = IP(dst = g2, src = src_ip)
+        log.info('Sending join message for group %s' %g2)
+        self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
+
+    def igmp_send_joins_different_groups_srclist_wait_query_packets(self, groups, sources, intf = V_INF1, delay = 2, ip_src = None, query_group1 = None, query_group2 = None):
+        g1 = groups[0]
+        g2 = groups[1]
+        sourcelist1 = sources[0]
+        sourcelist2 = sources[1]
+        eth = Ether(dst = self.MMACGROUP1, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
+        src_ip = ip_src or self.IP_SRC
+        ip = IP(dst = g1, src = src_ip)
+        if query_group1 is 'group1':
+            log.info('Sending join message for the group %s and waiting for a query packet on join interface' %g1)
+            self.send_igmp_join_recvQuery((g1,), None, src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
+        else: 
+            log.info('Sending join message for the group %s' %g1)
+            self.send_igmp_join((g1,), src_list = sourcelist1, ip_pkt = eth/ip, iface = intf, delay = 2)
+        eth = Ether(dst = self.MMACGROUP2, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
+        ip = IP(dst = g2, src = src_ip)
+        if query_group2 is 'group2':
+            log.info('Sending join message for the group %s and waiting for a query packet on join interface' %g2)
+            self.send_igmp_join_recvQuery((g2,), None, src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
+        else: 
+            log.info('Sending join message for group %s' %g2)
+            self.send_igmp_join((g2,), src_list = sourcelist2, ip_pkt = eth/ip, iface = intf, delay = 2)
+
+    def igmp_joins_leave_functionality(self, again_join = False, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2'], ['2.2.2.2']), intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateList1 = (igmpState1, igmpStateRecv1)
+
+        igmpState2 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv2 = IGMPTestState(groups = groups2, df = df)
+        igmpStateList2 = (igmpState2, igmpStateRecv2)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb,
+                                     arg = igmpState1)
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '2.2.2.2'
+        mcastTraffic2 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb,
+                                     arg = igmpState2)
+        mcastTraffic1.start()
+        mcastTraffic2.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        join_state2 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is receiving multicast groups %s' %groups1)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state2)
+        log.info('Interface is receiving multicast groups %s' %groups2)
+        log.info('Interface is sending leave message for groups %s now' %groups2)
+        self.send_igmp_leave(groups = groups2, src_list = ['2.2.2.2'], iface = self.V_INF1, delay = 2)
+        target3 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        target4 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state2)
+        assert target4 == 1, 'EXPECTED FAILURE'
+        if again_join:
+            dst_mac = '01:00:5e:02:02:03'
+            ip_dst = '239.2.2.3'
+            eth = Ether(dst = dst_mac, src = self.IGMP_SRC_MAC, type = ETH_P_IP)
+            ip = IP(dst = ip_dst, src = self.IP_SRC)
+            log.info('Interface sending join message again for the groups %s' %groups2)
+            self.send_igmp_join(groups2, src_list = [src_ip], ip_pkt = eth/ip, iface = self.V_INF1, delay = 2)
+            target5 = self.igmp_recv_task(self.V_INF1, groups2, join_state2)
+            log.info('Interface is receiving multicast groups %s again' %groups2)
+            target6 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+            log.info('Interface is still receiving from multicast groups %s' %groups1)
+        else:
+            log.info('Ended test case')
+        mcastTraffic1.stop()
+        mcastTraffic2.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_2joins_1leave_functionality(self):
+        ## '''This test is subscribing to two channels and sending leave for one channel'''
+        df = defer.Deferred()
+        def test_igmp_2joins_1leave():
+              self.igmp_joins_leave_functionality(again_join = False, df = df)
+              df.callback(0)
+        reactor.callLater(0, test_igmp_2joins_1leave)
+        return df
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+25)
+    def test_igmp_2joins_1leave_again_joins_functionality(self):
+        ## '''This test is subscribing to two channels and sending leave for one channel,again join to the same group'''
+        df = defer.Deferred()
+        def test_igmp_2joins_1leave_join_again():
+              self.igmp_joins_leave_functionality(again_join = True, df = df)
+              df.callback(0)
+        reactor.callLater(0, test_igmp_2joins_1leave_join_again)
+        return df
+
+    def igmp_not_in_src_list_functionality(self, df = None):
+        ## '''This test is sending multicast data from source 6.6.6.6 which is not in joins report '''
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                     (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '6.6.6.6'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface = 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        log.info('Interface should not receive from multicast groups %s from an interface, which is expected' %groups1)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s, working as expected' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_not_in_src_list_functionality(self):
+        ## '''This test is sending multicast data from source 6.6.6.6 which is not in joins report '''
+        df = defer.Deferred()
+        def igmp_not_in_src_list_functionality():
+              self.igmp_not_in_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_not_in_src_list_functionality)
+        return df
+
+    def igmp_change_to_exclude_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
+        target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target2 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
+    def test_igmp_change_to_exclude_src_list_functionality(self):
+        ## '''This test is checking that change to exclude source list functionality '''
+        df = defer.Deferred()
+        def igmp_change_to_exclude_src_list_functionality():
+              self.igmp_change_to_exclude_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_change_to_exclude_src_list_functionality)
+        return df
+
+    def igmp_include_to_allow_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4', '6.6.6.6'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
+    def test_igmp_include_to_allow_src_list_functionality(self):
+        '''Verify join INCLUDE (A) and Allow(B) from both A and B source list should receive multicast traffic. '''
+        df = defer.Deferred()
+        def igmp_include_to_allow_src_list_functionality():
+              self.igmp_include_to_allow_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_include_to_allow_src_list_functionality)
+        return df
+
+    def igmp_include_to_block_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.send_igmp_leave(groups = groups1, src_list = ['6.6.6.6','7.7.7.7'],
+                             iface = self.V_INF1, delay = 2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is still receiving from old multicast group data %s even after we send bolck list' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+30)
+    def test_igmp_include_to_block_src_list_functionality(self):
+        '''Verify join INCLUDE (A) and Block(B) from A source list, should receive multicast traffic.'''
+        df = defer.Deferred()
+        def igmp_include_to_block_src_list_functionality():
+              self.igmp_include_to_block_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_include_to_block_src_list_functionality)
+        return df
+
+
+    def igmp_change_to_include_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['6.6.6.6', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        target2 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is receiving from multicast groups %s after send Change to include message' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
+    def test_igmp_change_to_include_src_list_functionality(self):
+        ## '''This test is checking that change to include source list functionality '''
+        df = defer.Deferred()
+        def igmp_change_to_include_src_list_functionality():
+              self.igmp_change_to_include_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_change_to_include_src_list_functionality)
+        return df
+
+    def igmp_exclude_to_allow_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
+    def test_igmp_exclude_to_allow_src_list_functionality(self):
+        '''Verify join EXCLUDE(A) and Allow(B) from both A and B source list, should receive multicast traffic. '''
+        df = defer.Deferred()
+        def igmp_exclude_to_allow_src_list_functionality():
+              self.igmp_exclude_to_allow_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_exclude_to_allow_src_list_functionality)
+        return df
+
+    def igmp_exclude_to_block_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
+                             iface = self.V_INF1, delay = 2)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
+    def test_igmp_exclude_to_block_src_list_functionality(self):
+        ''' Verify join EXCLUDE (A) and Block(B) from A source list, should receive multicast traffic. '''
+        df = defer.Deferred()
+        def igmp_exclude_to_block_src_list_functionality():
+              self.igmp_exclude_to_block_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_exclude_to_block_src_list_functionality)
+        return df
+
+    def igmp_new_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1+groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '6.6.6.6'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        target2 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is receiving from multicast groups %s after sending join with new source list' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+10)
+    def test_igmp_new_src_list_functionality(self):
+        ## '''This test is checking that adding new source list to existing source list functionality '''
+        df = defer.Deferred()
+        def igmp_new_src_list_functionality():
+              self.igmp_new_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_new_src_list_functionality)
+        return df
+
+    def igmp_block_old_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving from multicast groups %s' %groups2)
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
+                                                      intf = self.V_INF1, delay = 2)
+        target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target2 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s after sending join with block old source list' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_block_old_src_list_functionality(self):
+        ## '''This test is checking that block old source list from existing source list functionality '''
+        df = defer.Deferred()
+        def igmp_block_old_src_list_functionality():
+              self.igmp_block_old_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_block_old_src_list_functionality)
+        return df
+
+    def igmp_include_empty_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s when we sent join with source list is empty' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def ztest_igmp_include_empty_src_list_functionality(self):
+        ## '''This test is checking that multicast functionality by sending empty source list to include  '''
+        ## '''Disabling this test as scapy IGMP doesn't work with empty source lists'''
+        df = defer.Deferred()
+        def igmp_include_empty_src_list_functionality():
+              self.igmp_include_empty_src_list_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_include_empty_src_list_functionality)
+        return df
+
+    def igmp_exclude_empty_src_list_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        self.send_igmp_leave(groups = groups2, src_list = [''], iface = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving multicast groups %s' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def ztest_igmp_exclude_empty_src_list_functionality(self):
+        ## '''This test is checking that multicast functionality by sending empty source list to exclude  '''
+        ## '''Disabling this test case since scapy IGMP doesn't work with empty src list'''
+        df = defer.Deferred()
+        def igmp_exclude_empty_src_list_functionality():
+              self.igmp_exclude_empty_src_list_functionality()
+              df.callback(0)
+        reactor.callLater(0, igmp_exclude_empty_src_list_functionality)
+        return df
+
+    def igmp_join_sourceip_0_0_0_0_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '0.0.0.0'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving from multicast groups %s when we sent join with source IP  is 0.0.0.0' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_join_sourceip_0_0_0_0_functionality(self):
+        ## '''This test is sending join with source list A,B,C and exclude D,F,G with source IP as 0.0.0.0'''
+        df = defer.Deferred()
+        def igmp_join_sourceip_0_0_0_0_functionality():
+              self.igmp_join_sourceip_0_0_0_0_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_join_sourceip_0_0_0_0_functionality)
+        return df
+
+
+    def igmp_invalid_join_packet_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MINVALIDGROUP1,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s when we sent invalid join packet ' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_invalid_join_packet_functionality(self):
+        ## '''This test is sending invalid join with source list A,B,C and exclude D,F,G with multicast group as 255.255.255.255'''
+        df = defer.Deferred()
+        def igmp_invalid_join_packet_functionality():
+              self.igmp_invalid_join_packet_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_invalid_join_packet_functionality)
+        return df
+
+    def igmp_join_data_receiving_during_subscriber_link_down_up_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving from multicast groups,  before bring down the self.V_INF1=%s  ' %self.V_INF1)
+        os.system('ifconfig '+self.V_INF1+' down')
+        log.info(' the self.V_INF1 %s is down now  ' %self.V_INF1)
+        os.system('ifconfig '+self.V_INF1)
+        time.sleep(10)
+        os.system('ifconfig '+self.V_INF1+' up')
+        os.system('ifconfig '+self.V_INF1)
+        log.info(' the self.V_INF1 %s is up now  ' %self.V_INF1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving from multicast groups %s when we bringup interface up after down  ' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_join_data_receiving_during_subscriber_link_up_down_functionality(self):
+        ## '''This test is sending join with source list A,B,C and exclude D,F,G with valid multicast group during receiving data, shutdown on data receiving port'''
+        df = defer.Deferred()
+        def igmp_join_data_receiving_during_subscriber_link_up_down_functionality():
+              self.igmp_join_data_receiving_during_subscriber_link_down_up_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_join_data_receiving_during_subscriber_link_down_up_functionality)
+        return df
+
+    def igmp_join_data_receiving_during_channel_distributor_link_up_down_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5', '6.6.6.6']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac1 = '01:00:5e:01:02:03'
+        dst_mac2 = '01:00:5e:02:02:03'
+        src_ip2 = '5.5.5.5'
+        src_ip1 = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpState2 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv2 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac1,
+                                     src_ip = src_ip1, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic2 = McastTraffic(groups2, iface= 'veth3', dst_mac = dst_mac2,
+                                     src_ip = src_ip2,  cb = self.send_mcast_cb, arg = igmpState2)
+        mcastTraffic1.start()
+        mcastTraffic2.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        join_state2 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state2)
+        log.info('Interface is receiving from multicast groups,  before bring down the veth2 and subscriber link is self.V_INF1=%s up ' %self.V_INF1)
+        mcastTraffic1.stop()
+        os.system('ifconfig '+'veth2'+' down')
+        log.info(' the channel distributor interface veth2 is down now  ' )
+        os.system('ifconfig '+'veth2')
+        time.sleep(10)
+        log.info('Verifying interface is still receiving a multicast groups2 %s traffic even though other group traffic sending interface goes down' %groups2)
+        target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Verified that interface is still receiving a multicast groups2 %s traffic even though other group traffic sending interface goes down' %groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups1 %s when we shutdown the subscriber interface ' %groups1)
+        os.system('ifconfig '+'veth2'+' up')
+        os.system('ifconfig '+'veth2')
+        log.info(' the channel distributor interface veth2 is up now  ')
+        time.sleep(10)
+        mcastTraffic1.start()
+        log.info('Verifying interface is receiving from both multicast groups data %s when we bringup interface up after down  ' %groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state2)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state2)
+        log.info('Interface is receiving from multicast groups %s when we bringup interface up after down  ' %groups2)
+        mcastTraffic2.stop()
+        self.onos_ctrl.deactivate()
+    ##  This test case is failing to receive traffic from multicast data from defferent channel interfaces TO-DO
+    ###### TO DO scenario #######
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
+    def ztest_igmp_join_data_receiving_during_channel_distributor_link_down_up_functionality(self):
+        ## '''This test is sending join with source list A,B,C with valid multicast group during receiving data, shutdown the data sending port'''
+        df = defer.Deferred()
+        def igmp_join_data_receiving_during_channel_distributor_link_down_up_functionality():
+              self.igmp_join_data_receiving_during_channel_distributor_link_down_up_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_join_data_receiving_during_channel_distributor_link_down_up_functionality)
+        return df
+
+    def igmp_invalidClassD_IP_join_packet_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MINVALIDGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s when we sent invalid join packet ' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_invalidClassD_IP_join_packet_functionality(self):
+        ## '''This test is sending invalid join with source list A,B,C with multicast group as 239.255.255.255'''
+        df = defer.Deferred()
+        def igmp_invalidClass_D_IP_join_packet_functionality():
+              self.igmp_invalidClass_D_IP_join_packet_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_invalidClassD_IP_join_packet_functionality)
+        return df
+
+    def igmp_invalidClassD_IP_as_srclistIP_join_packet_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['239.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s when we sent invalid join packet ' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+20)
+    def test_igmp_invalidClassD_IP_as_srclistIP_join_packet_functionality(self):
+        ## '''This test is sending invalid join with source list A,B,C and exclude D,F,G with source list as  239.5.5.5'''
+        df = defer.Deferred()
+        def igmp_invalidClassD_IP_as_srclistIP_join_packet_functionality():
+              self.igmp_invalidClassD_IP_as_srclistIP_join_packet_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_invalidClassD_IP_as_srclistIP_join_packet_functionality)
+        return df
+
+
+    def igmp_general_query_recv_packet_functionality(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        ip_src = '1.1.1.1'
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2, ip_src = ip_src)
+        ip_src = self.IP_SRC
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        log.info('Started delay to verify multicast data taraffic for group %s is received or not for 180 sec ' %groups2)
+        time.sleep(100)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Verified that  multicast data for group %s is received after 100 sec ' %groups2)
+        time.sleep(50)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Verified that  multicast data for group %s is received after 150 sec ' %groups2)
+        time.sleep(30)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Verified that  multicast data for group %s is received after 180 sec ' %groups2)
+        time.sleep(10)
+        target2 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Verified that  multicast data for group %s is received after 190 sec ' %groups2)
+        target3 = mcastTraffic1.isRecvStopped()
+        assert target3==False, 'EXPECTED FAILURE'
+        log.info('Verified that multicast data for a group %s is still transmitting from a data interface' %groups2)
+        log.info('Now checking joining interface is receiving a multicast data for group %s after 190 sec' %groups2)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target1==1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving multicast data for group %s' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+250)
+    def test_igmp_general_query_recv_packet_traffic_functionality(self):
+        ## '''This test is verifying whether multicast data is stopped after 180 sec time if we dont respond to query packet which will generated by a querier router'''
+        df = defer.Deferred()
+        def igmp_general_query_recv_packet_functionality():
+              self.igmp_general_query_recv_packet_functionality(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_general_query_recv_packet_functionality)
+        return df
+    
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
+    def test_igmp_query_packet_received_on_joining_interface(self):
+        ## '''This test is verifying whether joining interface is receiving general membership query packet from querier router '''
+        groups = ['224.0.1.10', '225.0.0.10']
+        leave_groups = ['224.0.1.10']
+        df = defer.Deferred()
+        igmpState = IGMPTestState(groups = groups, df = df)
+        igmpStateRecv = IGMPTestState(groups = groups, df = df)
+        igmpStateList = (igmpState, igmpStateRecv)
+        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, 
+                                    arg = igmpState)
+        self.df = df
+        self.mcastTraffic = mcastTraffic
+        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_srp_task(stateList):
+            igmpSendState, igmpRecvState = stateList
+            if not mcastTraffic.isRecvStopped():
+                result = self.igmp_recv(igmpRecvState)
+                reactor.callLater(0, igmp_srp_task, stateList)
+            else:
+                self.mcastTraffic.stop()
+                self.recv_socket.close()
+                self.igmp_verify_leave(stateList, leave_groups)
+                self.df.callback(0)
+
+        log.info('Sending join packet and expected to receive on egeneral query packet after 60 sec for multicast %s ' %groups)
+        self.send_igmp_join_recvQuery(groups)
+        log.info('Received a egeneral query packet for multicast %s group on joing interface and sending traffic' %groups)
+        mcastTraffic.start()
+        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
+        reactor.callLater(0, igmp_srp_task, igmpStateList)
+        return df
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
+    def test_igmp_periodic_query_packet_received_on_joining_interface(self):
+        ## '''This test is verifying whether joining interface is receiving periodic general membership query packets from querier router '''
+        groups = ['224.0.1.10', '225.0.0.10']
+        leave_groups = ['224.0.1.10']
+        df = defer.Deferred()
+        igmpState = IGMPTestState(groups = groups, df = df)
+        igmpStateRecv = IGMPTestState(groups = groups, df = df)
+        igmpStateList = (igmpState, igmpStateRecv)
+        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, 
+                                    arg = igmpState)
+        self.df = df
+        self.mcastTraffic = mcastTraffic
+        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_srp_task(stateList):
+            igmpSendState, igmpRecvState = stateList
+            if not mcastTraffic.isRecvStopped():
+                result = self.igmp_recv(igmpRecvState)
+                reactor.callLater(0, igmp_srp_task, stateList)
+            else:
+                self.mcastTraffic.stop()
+                self.recv_socket.close()
+                self.igmp_verify_leave(stateList, leave_groups)
+                self.df.callback(0)
+
+        self.send_igmp_join_recvQuery(groups,3)
+        return df
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
+    def test_igmp_periodic_query_packet_received_and_checking_entry_deleted(self):
+        ## '''This test is verifying whether joining interface is receiving periodic general membership query packets from querier router, checking expire membership interval and traffic should not receive'''
+        groups = ['224.0.1.10', '225.0.0.10']
+        leave_groups = ['224.0.1.10']
+        df = defer.Deferred()
+        igmpState = IGMPTestState(groups = groups, df = df)
+        igmpStateRecv = IGMPTestState(groups = groups, df = df)
+        igmpStateList = (igmpState, igmpStateRecv)
+        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, 
+                                    arg = igmpState)
+        self.df = df
+        self.mcastTraffic = mcastTraffic
+        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_srp_task(stateList):
+            igmpSendState, igmpRecvState = stateList
+            if not mcastTraffic.isRecvStopped():
+                result = self.igmp_recv(igmpRecvState)
+                reactor.callLater(0, igmp_srp_task, stateList)
+            else:
+                self.mcastTraffic.stop()
+                self.recv_socket.close()
+                self.igmp_verify_leave(stateList, leave_groups)
+                self.df.callback(0)
+
+        self.send_igmp_join_recvQuery(groups,3)
+        log.info('Received periodic egeneral query packets for multicast %s, now checking entry is deleted from tabel by sending traffic for that group' %groups)
+        mcastTraffic.start()
+        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
+        reactor.callLater(0, igmp_srp_task, igmpStateList)
+        return df
+
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+190)
+    def test_igmp_member_query_interval_expire_re_joining_interface(self):
+        ## '''This test is verifying whether joining interface is receiving multicast data even after group menership interval expire by sending again joining to that multicast data group'''
+        groups = ['224.0.1.10', '225.0.0.10']
+        leave_groups = ['224.0.1.10']
+        df = defer.Deferred()
+        igmpState = IGMPTestState(groups = groups, df = df)
+        igmpStateRecv = IGMPTestState(groups = groups, df = df)
+        igmpStateList = (igmpState, igmpStateRecv)
+        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, 
+                                    arg = igmpState)
+        self.df = df
+        self.mcastTraffic = mcastTraffic
+        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_srp_task(stateList):
+            igmpSendState, igmpRecvState = stateList
+            if not mcastTraffic.isRecvStopped():
+                result = self.igmp_recv(igmpRecvState)
+                reactor.callLater(0, igmp_srp_task, stateList)
+            else:
+                self.mcastTraffic.stop()
+                self.recv_socket.close()
+                self.igmp_verify_leave(stateList, leave_groups)
+                self.df.callback(0)
+
+        self.send_igmp_join_recvQuery(groups,3)
+        log.info('Received periodic egeneral query packets for multicast %s, now sending join packet again and verifying traffic for that group is received or not on joining interface' %groups)
+        self.send_igmp_join(groups)
+        mcastTraffic.start()
+        self.test_timer = reactor.callLater(self.MCAST_TRAFFIC_TIMEOUT, self.mcast_traffic_timer)
+        reactor.callLater(0, igmp_srp_task, igmpStateList)
+        return df
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+50)
+    def test_igmp_leave_verify_received_group_source_specific_query(self):
+        ## '''This test is verfying when we send leave message we should get group-source specific query '''
+        groups = ['224.0.1.10', '225.0.0.10']
+        leave_groups = ['224.0.1.10']
+        df = defer.Deferred()
+        igmpState = IGMPTestState(groups = groups, df = df)
+        igmpStateRecv = IGMPTestState(groups = groups, df = df)
+        igmpStateList = (igmpState, igmpStateRecv)
+        mcastTraffic = McastTraffic(groups, iface= 'veth2', cb = self.send_mcast_cb, 
+                                    arg = igmpState)
+        self.df = df
+        self.mcastTraffic = mcastTraffic
+        self.recv_socket = L3PacketSocket(iface = 'veth0', type = ETH_P_IP)
+        
+        def igmp_srp_task(stateList):
+            igmpSendState, igmpRecvState = stateList
+            if not mcastTraffic.isRecvStopped():
+                result = self.igmp_recv(igmpRecvState)
+                reactor.callLater(0, igmp_srp_task, stateList)
+            else:
+                self.mcastTraffic.stop()
+                self.recv_socket.close()
+                self.igmp_verify_leave(stateList, leave_groups)
+                self.df.callback(0)
+
+        self.send_igmp_join(groups)
+        self.send_igmp_leave_listening_group_specific_query(leave_groups, delay = 3)
+        return df
+
+    def igmp_change_to_exclude_src_list_check_for_group_source_specific_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['2.2.2.2'], iface = self.V_INF1, delay =2)
+        time.sleep(10)
+        target2 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target2 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s after sending CHANGE_TO_EXCLUDE' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+60)
+    def test_igmp_change_to_exclude_src_list_check_for_group_source_specific_query(self):
+        '''Verify join INCLUDE (A) and TO_EX(B) for a multicast group, 
+           we should receive group source specific membership query packet to A*B source list interface'''
+        df = defer.Deferred()
+        def igmp_change_to_exclude_src_list_check_for_group_source_specific_query():
+              self.igmp_change_to_exclude_src_list_check_for_group_source_specific_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_change_to_exclude_src_list_check_for_group_source_specific_query)
+        return df
+
+    def igmp_change_to_include_src_list_check_for_general_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
+                                                   (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['6.6.6.6', '5.5.5.5']),
+                                                    intf = self.V_INF1, delay = 2,query_group1 = 'group1', query_group2 = None)
+        time.sleep(10)
+        target2 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is receiving from multicast groups %s after send Change to include message' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
+    def test_igmp_change_to_include_src_list_check_for_general_query(self):
+        '''Verify join EXCLUDE (A) and TO_IN(B) for a multicast group, 
+        we should receive general membership query packet. '''
+        df = defer.Deferred()
+        def igmp_change_to_include_src_list_check_for_general_query():
+              self.igmp_change_to_include_src_list_check_for_general_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_change_to_include_src_list_check_for_general_query)
+        return df
+
+    def igmp_allow_new_src_list_check_for_general_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1+groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '6.6.6.6'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        #assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,                                                                              (['2.2.2.2', '6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                              intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
+        target2 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        log.info('Interface is receiving from multicast groups %s after sending join with new source list' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+80)
+    def test_igmp_allow_new_src_list_check_for_general_query(self):
+        '''Verify join INCLUDE (A) and ALLOW(B) for a multicast group, we 
+           should receive general membership query packet. '''
+        df = defer.Deferred()
+        def igmp_allow_new_src_list_check_for_general_query():
+              self.igmp_allow_new_src_list_check_for_general_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_allow_new_src_list_check_for_general_query)
+        return df
+
+    def igmp_block_old_src_list_check_for_group_source_specific_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        groups = groups1 + groups2
+        self.igmp_send_joins_different_groups_srclist(groups,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:02:02:03'
+        src_ip = '5.5.5.5'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups2, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups2, df = df)
+        mcastTraffic1 = McastTraffic(groups2, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups2, join_state1)
+        log.info('Interface is receiving from multicast groups %s' %groups2)
+        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups,
+                                                (['6.6.6.6', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '7.7.7.7']),
+                                                intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
+        target2 = self.igmp_not_recv_task(self.V_INF1, groups2, join_state1)
+        assert target2 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s after sending join with block old source list' %groups2)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
+    def test_igmp_block_old_src_list_check_for_group_source_specific_query(self):
+        """ Verify when we send join INCLUDE (A) and BLOCK(B) for a multicast group, we should receive 
+            group source specific membership query packet to A*B source list interface""" 
+        df = defer.Deferred()
+        def igmp_block_old_src_list_check_for_group_source_specific_query():
+              self.igmp_block_old_src_list_check_for_group_source_specific_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_block_old_src_list_check_for_group_source_specific_query)
+        return df
+
+    def igmp_include_to_allow_src_list_check_for_general_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,(['2.2.2.2', '3.3.3.3', '4.4.4.4', '6.6.6.6'], ['2.2.2.2', '5.5.5.5']),                                               intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
+    def test_igmp_include_to_allow_src_list_check_for_general_query(self):
+        '''Verify join INCLUDE (A) and Allow(B) ,should receive general membership query packet '''
+        df = defer.Deferred()
+        def igmp_include_to_allow_src_list_check_for_general_query():
+              self.igmp_include_to_allow_src_list_check_for_general_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_include_to_allow_src_list_check_for_general_query)
+        return df
+
+    def igmp_include_to_block_src_list_check_for_group_source_specific_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.igmp_send_joins_different_groups_srclist(groups1 + groups2,
+                                                      (['2.2.2.2', '3.3.3.3', '4.4.4.4'], ['2.2.2.2', '5.5.5.5']),
+                                                      intf = self.V_INF1, delay = 2)
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        self.send_igmp_leave_listening_group_specific_query(groups = groups1, src_list = ['6.6.6.6','7.7.7.7'],
+                             iface = self.V_INF1, delay = 2)
+        target1 = self.igmp_recv_task(self.V_INF1, groups1, join_state1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
+    def test_igmp_include_to_block_src_list_check_for_group_source_specific_query(self):
+        '''Verify join INCLUDE (A) and Block(B) ,should receive group source specific membership query packet. '''
+        df = defer.Deferred()
+        def igmp_include_to_block_src_list_check_for_group_source_specific_query():
+              self.igmp_include_to_block_src_list_check_for_group_source_specific_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_include_to_block_src_list_check_for_group_source_specific_query)
+        return df
+
+    def igmp_exclude_to_allow_src_list_check_for_general_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.igmp_send_joins_different_groups_srclist_wait_query_packets(groups1 + groups2,
+                                             (['6.6.6.6', '7.7.7.7', '8.8.8.8'], ['6.6.6.6', '5.5.5.5']),                                                                 intf = self.V_INF1, delay = 2, query_group1 = 'group1', query_group2 = None)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+90)
+    def test_igmp_exclude_to_allow_src_list_check_for_general_query(self):
+        '''Verify join EXCLUDE(A) and Allow(B) ,should receive general membership query packet. '''
+        df = defer.Deferred()
+        def igmp_exclude_to_allow_src_list_check_for_general_query():
+              self.igmp_exclude_to_allow_src_list_check_for_general_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_exclude_to_allow_src_list_check_for_general_query)
+        return df
+
+    def igmp_exclude_to_block_src_list_check_for_group_source_specific_query(self, df = None):
+        groups1 = (self.MGROUP1,)
+        groups2 = (self.MGROUP2,)
+        self.send_igmp_leave(groups = groups1, src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4'],
+                             iface = self.V_INF1, delay = 2)
+        
+        dst_mac = '01:00:5e:01:02:03'
+        src_ip = '2.2.2.2'
+        if df is None:
+              df = defer.Deferred()
+        igmpState1 = IGMPTestState(groups = groups1, df = df)
+        igmpStateRecv1 = IGMPTestState(groups = groups1, df = df)
+        mcastTraffic1 = McastTraffic(groups1, iface= 'veth2', dst_mac = dst_mac,
+                                     src_ip = src_ip, cb = self.send_mcast_cb, arg = igmpState1)
+        mcastTraffic1.start()
+        join_state1 = IGMPTestState(groups = groups1)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        self.send_igmp_leave_listening_group_specific_query(groups = groups1, 
+                                          src_list = ['2.2.2.2', '3.3.3.3', '4.4.4.4', '5.5.5.5', '7.7.7.7'],
+                                          iface = self.V_INF1, delay = 2)
+        target1= self.igmp_not_recv_task(self.V_INF1, groups1, join_state1)
+        assert target1 == 1, 'EXPECTED FAILURE'
+        log.info('Interface is not receiving from multicast groups %s' %groups1)
+        mcastTraffic1.stop()
+        self.onos_ctrl.deactivate()
+
+    @deferred(timeout=MCAST_TRAFFIC_TIMEOUT+40)
+    def test_igmp_exclude_to_block_src_list_check_for_group_source_specific_query(self):
+        '''Verify join EXCLUDE (A) and Block(B) ,should receive group source specific membership query packet.'''
+        df = defer.Deferred()
+        def igmp_exclude_to_block_src_list_check_for_group_source_specific_query():
+              self.igmp_exclude_to_block_src_list_check_for_group_source_specific_query(df = df)
+              df.callback(0)
+        reactor.callLater(0, igmp_exclude_to_block_src_list_check_for_group_source_specific_query)
+        return df
+
+
diff --git a/src/test/md5/__init__.py b/src/test/md5/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/md5/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/md5/md5AuthTest.py b/src/test/md5/md5AuthTest.py
new file mode 100644
index 0000000..d34f14d
--- /dev/null
+++ b/src/test/md5/md5AuthTest.py
@@ -0,0 +1,20 @@
+import unittest
+import os,sys
+from EapMD5 import MD5AuthTest
+
+class eap_auth_exchange(unittest.TestCase):
+      def test_eap_md5(self):
+          t = MD5AuthTest()
+          t.runTest()
+      def test_eap_md5_wrg_password(self):
+          t =  MD5AuthTest()
+          t._wrong_password()
+          t.runTest()
+
+if __name__ == '__main__':
+          t =  MD5AuthTest()
+          t.runTest()
+          ####### Start the EAP-MD5 Negative testcase 
+          t._wrong_password()
+          t.runTest()
+
diff --git a/src/test/onosCli/__init__.py b/src/test/onosCli/__init__.py
new file mode 100644
index 0000000..93253fd
--- /dev/null
+++ b/src/test/onosCli/__init__.py
@@ -0,0 +1,9 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+cli_dir = os.path.join(working_dir, '../cli')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
+__path__.append(cli_dir)
diff --git a/src/test/onosCli/onosCliTest.py b/src/test/onosCli/onosCliTest.py
new file mode 100644
index 0000000..039a9b9
--- /dev/null
+++ b/src/test/onosCli/onosCliTest.py
@@ -0,0 +1,36 @@
+import unittest
+import time
+import os
+import json
+from nose.tools import *
+from onosclidriver import OnosCliDriver
+from OnosCtrl import OnosCtrl
+from scapy.all import *
+
+log.setLevel('INFO')
+
+class routes_exchange(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        cls.cli = OnosCliDriver(connect = True)
+
+    @classmethod
+    def tearDownClass(cls):
+        cls.cli.disconnect()
+
+    def test_route_cli(self):
+        routes = json.loads(self.cli.routes(jsonFormat = True))
+        log.info('Routes: %s' %routes)
+
+    def test_devices_cli(self):
+        devices = json.loads(self.cli.devices(jsonFormat = True))
+        available_devices = filter(lambda d: d['available'], devices)
+        device_ids = [ d['id'] for d in devices ]
+        log.info('Available Devices: %s' %devices)
+        log.info('Device IDS: %s' %device_ids)
+
+    def test_flows_cli(self):
+        flows = json.loads(self.cli.flows(jsonFormat = True))
+        flows = filter(lambda f: f['flows'], flows)
+        log.info('Flows: %s' %flows)
diff --git a/src/test/pap/__init__.py b/src/test/pap/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/pap/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/pap/papTest.py b/src/test/pap/papTest.py
index 3ce25f9..5c9018a 100644
--- a/src/test/pap/papTest.py
+++ b/src/test/pap/papTest.py
@@ -1,8 +1,6 @@
 import unittest
 import os,sys
-CORD_TEST_UTILS = 'utils'
-test_root = os.getenv('CORD_TEST_ROOT') or './'
-sys.path.append(test_root + CORD_TEST_UTILS)
+from nose.tools import assert_equal
 from EapPAP import PAPAuthTest
 
 class eap_auth_exchange(unittest.TestCase):
diff --git a/src/test/setup/cord-test.py b/src/test/setup/cord-test.py
new file mode 100755
index 0000000..71bfa3e
--- /dev/null
+++ b/src/test/setup/cord-test.py
@@ -0,0 +1,289 @@
+#!/usr/bin/env python
+from argparse import ArgumentParser
+import os,sys,time
+utils_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../utils')
+sys.path.append(utils_dir)
+from OnosCtrl import OnosCtrl
+from OltConfig import OltConfig
+from CordContainer import *
+from CordTestServer import cord_test_server_start, cord_test_server_stop
+
+class CordTester(Container):
+    sandbox = '/root/test'
+    sandbox_setup = '/root/test/src/test/setup'
+    tester_base = os.path.dirname(os.path.realpath(__file__))
+    tester_paths = os.path.realpath(__file__).split(os.path.sep)
+    tester_path_index = tester_paths.index('cord-tester')
+    sandbox_host = os.path.sep.join(tester_paths[:tester_path_index+1])
+
+    host_guest_map = ( (sandbox_host, sandbox),
+                       ('/lib/modules', '/lib/modules'),
+                       ('/var/run/docker.sock', '/var/run/docker.sock')
+                       )
+    basename = 'cord-tester'
+
+    def __init__(self, ctlr_ip = None, image = 'cord-test/nose', tag = 'latest',
+                 env = None, rm = False, update = False):
+        self.ctlr_ip = ctlr_ip
+        self.rm = rm
+        self.name = self.get_name()
+        super(CordTester, self).__init__(self.name, image = image, tag = tag)
+        host_config = self.create_host_config(host_guest_map = self.host_guest_map, privileged = True)
+        volumes = []
+        for _, g in self.host_guest_map:
+            volumes.append(g)
+        if update is True or not self.img_exists():
+            self.build_image(image)
+        ##Remove test container if any
+        self.remove_container(self.name, force=True)
+        if env is not None and env.has_key('OLT_CONFIG'):
+            self.olt = True
+            olt_conf_file = os.path.join(self.tester_base, 'olt_config.json')
+            olt_config = OltConfig(olt_conf_file)
+            self.port_map = olt_config.olt_port_map()
+        else:
+            self.olt = False
+            self.port_map = None
+        print('Starting test container %s, image %s, tag %s' %(self.name, self.image, self.tag))
+        self.start(rm = False, volumes = volumes, environment = env, 
+                   host_config = host_config, tty = True)
+
+    def execute_switch(self, cmd, shell = False):
+        if self.olt:
+            return os.system(cmd)
+        return self.execute(cmd, shell = shell)
+
+    def start_switch(self, bridge = 'ovsbr0', boot_delay = 2):
+        """Start OVS"""
+        ##Determine if OVS has to be started locally or not
+        s_file,s_sandbox = ('of-bridge-local.sh',self.tester_base) if self.olt else ('of-bridge.sh',self.sandbox_setup)
+        ovs_cmd = os.path.join(s_sandbox, '{0}'.format(s_file)) + ' {0}'.format(bridge)
+        if self.olt:
+            ovs_cmd += ' {0}'.format(self.ctlr_ip)
+            print('Starting OVS on the host')
+        else:
+            print('Starting OVS on test container %s' %self.name)
+        self.execute_switch(ovs_cmd)
+        status = 1
+        ## Wait for the LLDP flows to be added to the switch
+        tries = 0
+        while status != 0 and tries < 200:
+            cmd = 'sudo ovs-ofctl dump-flows {0} | grep \"type=0x8942\"'.format(bridge)
+            status = self.execute_switch(cmd, shell = True)
+            tries += 1
+            if tries % 10 == 0:
+                print('Waiting for test switch to be connected to ONOS controller ...')
+
+        if status != 0:
+            print('Test Switch not connected to ONOS container.'
+                  'Please remove ONOS container and restart the test')
+            if self.rm:
+                self.kill()
+            sys.exit(1)
+
+        if boot_delay:
+            time.sleep(boot_delay)
+
+    def setup_intfs(self):
+        if not self.olt:
+            return 0
+        tester_intf_subnet = '192.168.100'
+        res = 0
+        port_num = 0
+        host_intf = self.port_map['host']
+        start_vlan = self.port_map['start_vlan']
+        for port in self.port_map['ports']:
+            guest_if = port
+            local_if = guest_if
+            guest_ip = '{0}.{1}/24'.format(tester_intf_subnet, str(port_num+1))
+            ##Use pipeworks to configure container interfaces on host/bridge interfaces
+            pipework_cmd = 'pipework {0} -i {1} -l {2} {3} {4}'.format(host_intf, guest_if, local_if, self.name, guest_ip)
+            if start_vlan != 0:
+                pipework_cmd += ' @{}'.format(str(start_vlan + port_num))
+                
+            res += os.system(pipework_cmd)
+            port_num += 1
+
+        return res
+
+    @classmethod
+    def get_name(cls):
+        cnt_name = '/{0}'.format(cls.basename)
+        cnt_name_len = len(cnt_name)
+        names = list(flatten(n['Names'] for n in cls.dckr.containers(all=True)))
+        test_names = filter(lambda n: n.startswith(cnt_name), names)
+        last_cnt_number = 0
+        if test_names:
+            last_cnt_name = reduce(lambda n1, n2: n1 if int(n1[cnt_name_len:]) > \
+                                       int(n2[cnt_name_len:]) else n2,
+                                   test_names)
+            last_cnt_number = int(last_cnt_name[cnt_name_len:])
+        test_cnt_name = cls.basename + str(last_cnt_number+1)
+        return test_cnt_name
+
+    @classmethod
+    def build_image(cls, image):
+        print('Building test container docker image %s' %image)
+        ovs_version = '2.5.0'
+        image_format = (ovs_version,)*4
+        dockerfile = '''
+FROM ubuntu:14.04
+MAINTAINER chetan@ciena.com
+RUN apt-get update 
+RUN apt-get -y install git python python-pip python-setuptools python-scapy tcpdump doxygen doxypy wget
+RUN easy_install nose
+RUN apt-get -y install openvswitch-common openvswitch-switch
+RUN mkdir -p /root/ovs
+WORKDIR /root
+RUN wget http://openvswitch.org/releases/openvswitch-{}.tar.gz -O /root/ovs/openvswitch-{}.tar.gz && \
+(cd /root/ovs && tar zxpvf openvswitch-{}.tar.gz && \
+ cd openvswitch-{} && \
+ ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install)
+RUN service openvswitch-switch restart || /bin/true
+RUN apt-get -y install python-twisted python-sqlite sqlite3 python-pexpect telnet
+RUN pip install scapy-ssl_tls
+RUN pip install -U scapy
+RUN pip install monotonic
+RUN pip install configObj
+RUN pip install -U docker-py
+RUN pip install -U pyyaml
+RUN pip install -U nsenter
+RUN pip install -U pyroute2
+RUN pip install -U netaddr
+RUN apt-get -y install arping
+RUN mv /usr/sbin/tcpdump /sbin/
+RUN ln -sf /sbin/tcpdump /usr/sbin/tcpdump
+CMD ["/bin/bash"]
+'''.format(*image_format)
+        super(CordTester, cls).build_image(dockerfile, image)
+        print('Done building docker image %s' %image)
+
+    def run_tests(self, tests):
+        '''Run the list of tests'''
+        for t in tests:
+            test = t.split(':')[0]
+            if test == 'tls':
+                test_file = test + 'AuthTest.py'
+            else:
+                test_file = test + 'Test.py'
+
+            if t.find(':') >= 0:
+                test_case = test_file + ':' + t.split(':')[1]
+            else:
+                test_case = test_file
+            cmd = 'nosetests -v {0}/src/test/{1}/{2}'.format(self.sandbox, test, test_case)
+            status = self.execute(cmd, shell = True)
+            print('Test %s %s' %(test_case, 'Success' if status == 0 else 'Failure'))
+        print('Done running tests')
+        if self.rm:
+            print('Removing test container %s' %self.name)
+            self.kill(remove=True)
+
+    @classmethod
+    def list_tests(cls, tests):
+        print('Listing test cases')
+        for test in tests:
+            if test == 'tls':
+                test_file = test + 'AuthTest.py'
+            else:
+                test_file = test + 'Test.py'
+            cmd = 'nosetests -v --collect-only {0}/../{1}/{2}'.format(cls.tester_base, test, test_file)
+            os.system(cmd)
+
+##default onos/radius/test container images and names
+onos_image_default='onosproject/onos:latest'
+nose_image_default='cord-test/nose:latest'
+test_type_default='dhcp'
+onos_app_version = '1.0-SNAPSHOT'
+cord_tester_base = os.path.dirname(os.path.realpath(__file__))
+onos_app_file = os.path.abspath('{0}/../apps/ciena-cordigmp-'.format(cord_tester_base) + onos_app_version + '.oar')
+
+def runTest(args):
+    #Start the cord test tcp server
+    test_server = cord_test_server_start()
+    tests = args.test_type.split('-')
+    onos_cnt = {'tag':'latest'}
+    nose_cnt = {'image': 'cord-test/nose','tag': 'latest'}
+    radius_ip = None
+    quagga_ip = None
+    if args.cleanup:
+        cleanup_container = args.cleanup
+        if cleanup_container.find(':') < 0:
+            cleanup_container += ':latest'
+        print('Cleaning up containers %s' %cleanup_container)
+        Container.cleanup(cleanup_container)
+        sys.exit(0)
+
+    if args.list:
+        CordTester.list_tests(tests)
+        sys.exit(0)
+
+    #don't spawn onos if the user has specified external test controller with test interface config
+    if args.test_controller:
+        ips = args.test_controller.split('/')
+        onos_ip = ips[0]
+        if len(ips) > 1:
+            radius_ip = ips[1]
+        else:
+            radius_ip = None
+    else:
+        onos_cnt['image'] = args.onos.split(':')[0]
+        if args.onos.find(':') >= 0:
+            onos_cnt['tag'] = args.onos.split(':')[1]
+
+        onos = Onos(image = onos_cnt['image'], tag = onos_cnt['tag'], boot_delay = 60)
+        onos_ip = onos.ip()
+
+        ##Start Radius container if specified
+        if args.radius == True:
+            radius = Radius()
+            radius_ip = radius.ip()
+            print('Radius server running with IP %s' %radius_ip)
+        else:
+            radius_ip = None
+            
+    print('Onos IP %s, Test type %s' %(onos_ip, args.test_type))
+    print('Installing ONOS app %s' %onos_app_file)
+    OnosCtrl.install_app(args.app, onos_ip = onos_ip)
+    
+    if args.quagga == True:
+        #Start quagga. Builds container if required
+        quagga = Quagga()
+        quagga_ip = quagga.ip()
+        
+    test_cnt_env = { 'ONOS_CONTROLLER_IP' : onos_ip,
+                     'ONOS_AAA_IP' : radius_ip if radius_ip is not None else '',
+                     'QUAGGA_IP': quagga_ip if quagga_ip is not None else '',
+                   }
+    if args.olt:
+        olt_conf_test_loc = os.path.join(CordTester.sandbox_setup, 'olt_config.json')
+        test_cnt_env['OLT_CONFIG'] = olt_conf_test_loc
+
+    test_cnt = CordTester(ctlr_ip = onos_ip, image = nose_cnt['image'], tag = nose_cnt['tag'],
+                          env = test_cnt_env,
+                          rm = False if args.keep else True,
+                          update = args.update)
+    if args.start_switch or not args.olt:
+        test_cnt.start_switch()
+    test_cnt.setup_intfs()
+    test_cnt.run_tests(tests)
+    cord_test_server_stop(test_server)
+
+if __name__ == '__main__':
+    parser = ArgumentParser(description='Cord Tester')
+    parser.add_argument('-t', '--test-type', default=test_type_default, type=str)
+    parser.add_argument('-o', '--onos', default=onos_image_default, type=str, help='ONOS container image')
+    parser.add_argument('-r', '--radius',action='store_true', help='Start Radius service')
+    parser.add_argument('-q', '--quagga',action='store_true',help='Provision quagga container for vrouter')
+    parser.add_argument('-a', '--app', default=onos_app_file, type=str, help='Cord ONOS app filename')
+    parser.add_argument('-p', '--olt', action='store_true', help='Use OLT config')
+    parser.add_argument('-l', '--list', action='store_true', help='List test cases')
+    parser.add_argument('-e', '--test-controller', default='', type=str, help='External test controller ip for Onos and/or radius server.'
+                        'Eg: 10.0.0.2/10.0.0.3 to specify ONOS and Radius ip to connect')
+    parser.add_argument('-c', '--cleanup', default='', type=str, help='Cleanup test containers')
+    parser.add_argument('-k', '--keep', action='store_true', help='Keep test container after tests')
+    parser.add_argument('-s', '--start-switch', action='store_true', help='Start OVS when running under OLT config')
+    parser.add_argument('-u', '--update', action='store_true', help='Update test container image')
+    parser.set_defaults(func=runTest)
+    args = parser.parse_args()
+    args.func(args)
diff --git a/src/test/setup/cord-test.sh b/src/test/setup/cord-test.sh
new file mode 100755
index 0000000..f1a5004
--- /dev/null
+++ b/src/test/setup/cord-test.sh
@@ -0,0 +1,209 @@
+#!/usr/bin/env bash
+
+function show_help {
+    echo "Usage: ${0#*/} -h | this help -n <onos_ip> -O | use olt config | -r <radius_ip> -o <onos cnt image> -a < onos app file> -d <radius cnt image> -t <test type> -c | cleanup test containers -C <cleanup container list> -k | kill the test container -b <test cnt image> | build test container docker image"
+    exit 1
+}
+
+function cnt_ipaddr {
+    local image="${1}"
+    local cnt=`docker ps |grep "${image}" |awk '{print $1}'`
+    local ipaddr
+    ipaddr=`docker inspect -f '{{.NetworkSettings.IPAddress}}' $cnt`
+    echo $ipaddr
+}
+
+function onos_start {
+    local image="${1}"
+    local port_str=""
+    for p in 8181 8101 9876 6653 6633; do
+        port_str="$port_str -p $p:$p/tcp"
+    done
+    ONOS_APPS="drivers,openflow,proxyarp,aaa,igmp"
+    local cnt=`docker run -itd $port_str -e ONOS_APPS=${ONOS_APPS} $image /bin/bash`
+    local ipaddr
+    ipaddr=`docker inspect -f '{{.NetworkSettings.IPAddress}}' $cnt`
+    echo $ipaddr
+}
+
+test_type=dhcp
+onos_cnt_image=onosproject/onos
+radius_cnt_image=radius-server:dev
+onos_ip=
+radius_ip=
+OPTIND=1
+nose_cnt_image="onos:nosetest"
+cleanup=0
+kill_test_cnt=0
+build_cnt_image=
+cleanup_cnt_list=
+app_version=1.0-SNAPSHOT
+onos_app_file=$PWD/../apps/ciena-cordigmp-$app_version.oar
+olt_config=0
+
+while getopts "h?a:n:r:o:d:t:cC:kOb:" opt; do
+    case "$opt" in
+        h|\?)
+            show_help
+            exit 1
+            ;;
+        O)
+            olt_config=1
+            ;;
+        t)
+            test_type=$OPTARG
+            ;;
+        n)
+            onos_ip=$OPTARG
+            ;;
+        r)
+            radius_ip=$OPTARG
+            ;;
+        o)
+            onos_cnt_image=$OPTARG
+            ;;
+        d)
+            radius_cnt_image=$OPTARG
+            ;;
+        a)
+            onos_app_file=$OPTARG
+            ;;
+        c)
+            cleanup=1
+            ;;
+        C)
+            cleanup=1
+            cleanup_cnt_list=$OPTARG
+            ;;
+        k)
+            kill_test_cnt=1
+            ;;
+        b)
+            build_cnt_image=$OPTARG
+            ;;
+    esac
+done
+
+shift $(($OPTIND-1))
+
+if [ $# -gt 0 ]; then
+    echo "Invalid args"
+    show_help
+fi
+
+if [ $cleanup -eq 1 ]; then
+    if [ x"$cleanup_cnt_list" != "x" ]; then
+        IFS='-' read -r -a cleanup_list <<<"${cleanup_cnt_list}"
+        for container in "${cleanup_list[@]}"; do
+            cnt_id=`docker ps | grep "${container}" | awk '{print $1}'`
+            echo "Killing container $cnt_id"
+            docker kill $cnt_id
+        done
+        exit 0
+    fi
+    for container in `docker ps | grep "${nose_cnt_image}" | awk '{print $1}'`; do
+        echo "Killing test container $container"
+        docker kill $container
+    done
+    exit 0
+fi
+
+if [ x"$onos_ip" = "x" ]; then
+    ##First try fetching the existing ip for onos container
+    onos_ip=$(cnt_ipaddr $onos_cnt_image)
+    ##If we find no onos running, then spawn the container if we can
+    if [ x"$onos_ip" = "x" ]; then
+        ##If the container image is from onosproject, we can try starting it
+        if [[ "$onos_cnt_image" =~ "onosproject/" ]]; then
+            echo "Starting ONOS container $onos_cnt_image"
+            onos_ip=$(onos_start $onos_cnt_image)
+            echo "Waiting 60 seconds for ONOS to fully boot up"
+            sleep 60
+        fi
+    fi
+fi
+
+if [ x"$onos_ip" = "x" ]; then
+    echo "ONOS not running or container name is invalid"
+    exit 127
+fi
+
+if [ x"$radius_ip" = "x" ]; then
+    radius_ip=$(cnt_ipaddr $radius_cnt_image)
+fi
+
+echo "Onos IP $onos_ip, Radius IP $radius_ip, Test type $test_type"
+sed "s,%%CONTROLLER%%,$onos_ip,g" of-bridge-template.sh > $HOME/nose_exp/of-bridge.sh
+
+if [ x"$build_cnt_image" != "x" ]; then
+    echo "Building test container docker image $build_cnt_image"
+    (cd test_docker && docker build -t $build_cnt_image . )
+    sleep 2
+    echo "Done building docker image $build_cnt_image"
+    nose_cnt_image=$build_cnt_image
+fi
+
+function install_onos_app {
+    local app=$1
+    local onos_url="http://$onos_ip:8181/onos/v1/applications"
+    local curl="curl -sS --user karaf:karaf"
+    $curl -X POST -HContent-Type:application/octet-stream $onos_url?activate=true --data-binary @$app
+}
+
+echo "Installing and activating onos app $onos_app_file"
+
+install_onos_app $onos_app_file
+
+if [ $olt_config -eq 1 ]; then
+    olt_conf_loc="$PWD/olt_config.json"
+    olt_conf_test_loc="/root/test"${olt_conf_loc#$HOME\/nose_exp}
+    olt_env="OLT_CONFIG=$olt_conf_test_loc"
+    echo -e "\nTest running on OLT switch with olt env ${olt_env}"
+else
+    olt_env="OLT_CONFIG="
+    echo -e "\nTest running on OVS"
+fi
+echo "Starting test container $nose_cnt_image"
+test_cnt=`docker run -itd --privileged -v $HOME/nose_exp:/root/test -v /lib/modules:/lib/modules -e ONOS_CONTROLLER_IP=$onos_ip -e ONOS_AAA_IP=$radius_ip -e ${olt_env} $nose_cnt_image /bin/bash`
+echo "Setting up test container $test_cnt"
+docker exec $test_cnt pip install monotonic
+echo "Starting up the OVS switch on the test container $test_cnt"
+docker exec $test_cnt /root/test/of-bridge.sh br0
+status=0
+while [ $status -ne 0 ]; do
+    echo "Waiting for the switch to get connected to controller"
+    docker exec $test_cnt ovs-ofctl dump-flows br0  | grep "type=0x8942"
+    status=$?
+    sleep 1
+done
+sleep 5
+
+IFS='-' read -r -a tests <<<"${test_type}"
+for t in "${tests[@]}"; do
+    test_method="${t#*:}"
+    test="${t%%:*}"
+    case "$test" in
+        tls)
+            test_file="$test"AuthTest.py
+            ;;
+        *)
+            test_file="$test"Test.py
+            ;;
+    esac
+    if [ "$test_method" != "$t" ]; then
+        test_case="$test_file":"${test_method}"
+    else
+        test_case="$test_file"
+    fi
+    echo "Running test $test, test case $test_case"
+    docker exec $test_cnt nosetests -v /root/test/git/cord-tester/src/test/$test/"${test_case}"
+    echo "Test $t exited with status $?"
+done
+
+echo "Done running tests."
+
+if [ $kill_test_cnt -eq 1 ]; then
+    echo "Killing test container $test_cnt"
+    docker kill $test_cnt
+fi
+
diff --git a/src/test/setup/eval.sh b/src/test/setup/eval.sh
new file mode 100755
index 0000000..adccf10
--- /dev/null
+++ b/src/test/setup/eval.sh
@@ -0,0 +1,18 @@
+#!/usr/bin/env bash
+cord_tester="$(dirname $0)/cord-test.py"
+if [ ! -f $cord_tester ]; then
+  cord_tester="$HOME/cord-tester/src/test/setup/cord-test.py"
+fi
+ONOS_IGMP="onosproject/onos:1.5"
+docker kill cord-onos || true
+docker kill cord-quagga || true
+echo "Running TLS authentication test"
+$cord_tester -r -t tls
+echo "Running DHCP request test"
+$cord_tester -q -t dhcp:dhcp_exchange.test_dhcp_1request
+docker kill cord-onos || true
+echo "Running IGMP join verify test"
+$cord_tester -q -o $ONOS_IGMP -t igmp:igmp_exchange.test_igmp_join_verify_traffic
+docker kill cord-onos || true
+echo "Running VROUTER test with 5 routes"
+$cord_tester -q -t vrouter:vrouter_exchange.test_vrouter_1
\ No newline at end of file
diff --git a/src/test/setup/of-bridge-local.sh b/src/test/setup/of-bridge-local.sh
new file mode 100755
index 0000000..114725d
--- /dev/null
+++ b/src/test/setup/of-bridge-local.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+bridge="$1"
+controller="$2"
+if [ x"$bridge" = "x" ]; then
+  bridge="ovsbr0"
+fi
+if [ x"$controller" = "x" ]; then
+  controller=$ONOS_CONTROLLER_IP
+fi
+service openvswitch-switch start
+echo "Configuring ovs bridge $bridge"
+ovs-vsctl del-br $bridge
+ovs-vsctl add-br $bridge
+my_ip=`ifconfig docker0 | grep "inet addr" | tr -s ' ' | cut -d":" -f2 |cut -d" " -f1`
+ovs-vsctl set-controller $bridge ptcp:6653:$my_ip tcp:$controller:6633
+ovs-vsctl set controller $bridge max_backoff=1000
+ovs-vsctl set bridge $bridge protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
+ovs-vsctl show
+ovs-ofctl show $bridge
diff --git a/src/test/setup/of-bridge-template.sh b/src/test/setup/of-bridge-template.sh
new file mode 100755
index 0000000..1be9a4c
--- /dev/null
+++ b/src/test/setup/of-bridge-template.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+bridge="$1"
+controller="$2"
+if [ x"$bridge" = "x" ]; then
+  bridge="br0"
+fi
+if [ x"$controller" = "x" ]; then
+  controller="%%CONTROLLER%%"
+fi
+service openvswitch-switch restart
+num_ports=200
+ports=$(($num_ports-1))
+for vports in $(seq 0 2 $ports); do
+   echo "Deleting veth$vports"
+   ip link del veth$vports
+done
+for vports in $(seq 0 2 $ports); do
+  ip link add type veth
+  ifconfig veth$vports up
+  ifconfig veth$(($vports+1)) up
+done
+echo "Configuring ovs bridge $bridge"
+ovs-vsctl del-br $bridge
+ovs-vsctl add-br $bridge
+for i in $(seq 1 2 $ports); do
+  ovs-vsctl add-port $bridge veth$i
+done
+my_ip=`ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d":" -f2 |cut -d" " -f1`
+ovs-vsctl set-controller $bridge ptcp:6653:$my_ip tcp:$controller:6633
+ovs-vsctl set controller $bridge max_backoff=1000
+ovs-vsctl set bridge $bridge protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
+ovs-vsctl show
+ovs-ofctl show $bridge
diff --git a/src/test/setup/of-bridge.sh b/src/test/setup/of-bridge.sh
new file mode 100755
index 0000000..1375fb5
--- /dev/null
+++ b/src/test/setup/of-bridge.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+bridge="$1"
+controller="$2"
+if [ x"$bridge" = "x" ]; then
+  bridge="ovsbr0"
+fi
+if [ x"$controller" = "x" ]; then
+  controller=$ONOS_CONTROLLER_IP
+fi
+service openvswitch-switch restart
+num_ports=200
+ports=$(($num_ports-1))
+for vports in $(seq 0 2 $ports); do
+   echo "Deleting veth$vports"
+   ip link del veth$vports
+done
+for vports in $(seq 0 2 $ports); do
+  ip link add type veth
+  ifconfig veth$vports up
+  ifconfig veth$(($vports+1)) up
+done
+echo "Configuring ovs bridge $bridge"
+ovs-vsctl del-br $bridge
+ovs-vsctl add-br $bridge
+#ovs-vsctl set bridge $bridge other-config:hwaddr=00:11:22:33:44:55
+for i in $(seq 1 2 $ports); do
+  ovs-vsctl add-port $bridge veth$i
+done
+my_ip=`ifconfig eth0 | grep "inet addr" | tr -s ' ' | cut -d":" -f2 |cut -d" " -f1`
+ovs-vsctl set-controller $bridge ptcp:6653:$my_ip tcp:$controller:6633
+ovs-vsctl set controller $bridge max_backoff=1000
+ovs-vsctl set bridge $bridge protocols=OpenFlow10,OpenFlow11,OpenFlow12,OpenFlow13
+ovs-vsctl show
+ovs-ofctl show $bridge
diff --git a/src/test/setup/olt_config.json b/src/test/setup/olt_config.json
new file mode 100644
index 0000000..2840af6
--- /dev/null
+++ b/src/test/setup/olt_config.json
@@ -0,0 +1,2 @@
+{ "olt" : false , "port_map" : { "ports" : [ "veth0", "veth2" ], "tx" : "veth2", "rx" : "veth0", "host" : "enp0s8", "start_vlan" : 1000 }, "uplink" : 2, "vlan" : 0 }
+  
diff --git a/src/test/setup/onos-config/network-cfg-sample.json b/src/test/setup/onos-config/network-cfg-sample.json
new file mode 100644
index 0000000..b858540
--- /dev/null
+++ b/src/test/setup/onos-config/network-cfg-sample.json
@@ -0,0 +1 @@
+{"apps": {"org.onosproject.router": {"router": {"ospfEnabled": true, "interfaces": ["b1-1", "b1-2", "b1-3", "b1-4", "b1-5", "b1-6", "b1-7", "b1-8", "b1-9", "b1-10", "b1-11", "b1-12", "b1-13", "b1-14", "b1-15", "b1-16", "b1-17", "b1-18", "b1-19", "b1-20", "b1-21", "b1-22", "b1-23", "b1-24", "b1-25", "b1-26", "b1-27", "b1-28", "b1-29", "b1-30", "b1-31", "b1-32", "b1-33", "b1-34", "b1-35", "b1-36", "b1-37", "b1-38", "b1-39", "b1-40", "b1-41", "b1-42", "b1-43", "b1-44", "b1-45", "b1-46", "b1-47", "b1-48", "b1-49", "b1-50", "b1-51", "b1-52", "b1-53", "b1-54", "b1-55", "b1-56", "b1-57", "b1-58", "b1-59", "b1-60", "b1-61", "b1-62", "b1-63", "b1-64", "b1-65", "b1-66", "b1-67", "b1-68", "b1-69", "b1-70", "b1-71", "b1-72", "b1-73", "b1-74", "b1-75", "b1-76", "b1-77", "b1-78", "b1-79", "b1-80", "b1-81", "b1-82", "b1-83", "b1-84", "b1-85", "b1-86", "b1-87", "b1-88", "b1-89", "b1-90", "b1-91", "b1-92", "b1-93", "b1-94", "b1-95", "b1-96", "b1-97", "b1-98", "b1-99"], "controlPlaneConnectPoint": "of:000002ddebbeb549/100"}}}, "ports": {"of:000002ddebbeb549/19": {"interfaces": [{"ips": ["11.0.18.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-19"}]}, "of:000002ddebbeb549/18": {"interfaces": [{"ips": ["11.0.17.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-18"}]}, "of:000002ddebbeb549/11": {"interfaces": [{"ips": ["11.0.10.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-11"}]}, "of:000002ddebbeb549/10": {"interfaces": [{"ips": ["11.0.9.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-10"}]}, "of:000002ddebbeb549/13": {"interfaces": [{"ips": ["11.0.12.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-13"}]}, "of:000002ddebbeb549/12": {"interfaces": [{"ips": ["11.0.11.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-12"}]}, "of:000002ddebbeb549/15": {"interfaces": [{"ips": ["11.0.14.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-15"}]}, "of:000002ddebbeb549/14": {"interfaces": [{"ips": ["11.0.13.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-14"}]}, "of:000002ddebbeb549/17": {"interfaces": [{"ips": ["11.0.16.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-17"}]}, "of:000002ddebbeb549/16": {"interfaces": [{"ips": ["11.0.15.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-16"}]}, "of:000002ddebbeb549/82": {"interfaces": [{"ips": ["11.0.81.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-82"}]}, "of:000002ddebbeb549/83": {"interfaces": [{"ips": ["11.0.82.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-83"}]}, "of:000002ddebbeb549/80": {"interfaces": [{"ips": ["11.0.79.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-80"}]}, "of:000002ddebbeb549/81": {"interfaces": [{"ips": ["11.0.80.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-81"}]}, "of:000002ddebbeb549/86": {"interfaces": [{"ips": ["11.0.85.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-86"}]}, "of:000002ddebbeb549/87": {"interfaces": [{"ips": ["11.0.86.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-87"}]}, "of:000002ddebbeb549/84": {"interfaces": [{"ips": ["11.0.83.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-84"}]}, "of:000002ddebbeb549/85": {"interfaces": [{"ips": ["11.0.84.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-85"}]}, "of:000002ddebbeb549/88": {"interfaces": [{"ips": ["11.0.87.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-88"}]}, "of:000002ddebbeb549/89": {"interfaces": [{"ips": ["11.0.88.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-89"}]}, "of:000002ddebbeb549/1": {"interfaces": [{"ips": ["11.0.0.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-1"}]}, "of:000002ddebbeb549/3": {"interfaces": [{"ips": ["11.0.2.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-3"}]}, "of:000002ddebbeb549/2": {"interfaces": [{"ips": ["11.0.1.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-2"}]}, "of:000002ddebbeb549/5": {"interfaces": [{"ips": ["11.0.4.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-5"}]}, "of:000002ddebbeb549/4": {"interfaces": [{"ips": ["11.0.3.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-4"}]}, "of:000002ddebbeb549/7": {"interfaces": [{"ips": ["11.0.6.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-7"}]}, "of:000002ddebbeb549/6": {"interfaces": [{"ips": ["11.0.5.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-6"}]}, "of:000002ddebbeb549/9": {"interfaces": [{"ips": ["11.0.8.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-9"}]}, "of:000002ddebbeb549/8": {"interfaces": [{"ips": ["11.0.7.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-8"}]}, "of:000002ddebbeb549/91": {"interfaces": [{"ips": ["11.0.90.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-91"}]}, "of:000002ddebbeb549/90": {"interfaces": [{"ips": ["11.0.89.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-90"}]}, "of:000002ddebbeb549/93": {"interfaces": [{"ips": ["11.0.92.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-93"}]}, "of:000002ddebbeb549/92": {"interfaces": [{"ips": ["11.0.91.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-92"}]}, "of:000002ddebbeb549/95": {"interfaces": [{"ips": ["11.0.94.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-95"}]}, "of:000002ddebbeb549/94": {"interfaces": [{"ips": ["11.0.93.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-94"}]}, "of:000002ddebbeb549/97": {"interfaces": [{"ips": ["11.0.96.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-97"}]}, "of:000002ddebbeb549/96": {"interfaces": [{"ips": ["11.0.95.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-96"}]}, "of:000002ddebbeb549/99": {"interfaces": [{"ips": ["11.0.98.2/24", "11.0.99.2/24", "11.0.100.2/24", "11.0.101.2/24", "11.0.102.2/24", "11.0.103.2/24", "11.0.104.2/24", "11.0.105.2/24", "11.0.106.2/24", "11.0.107.2/24", "11.0.108.2/24", "11.0.109.2/24", "11.0.110.2/24", "11.0.111.2/24", "11.0.112.2/24", "11.0.113.2/24", "11.0.114.2/24", "11.0.115.2/24", "11.0.116.2/24", "11.0.117.2/24", "11.0.118.2/24", "11.0.119.2/24", "11.0.120.2/24", "11.0.121.2/24", "11.0.122.2/24", "11.0.123.2/24", "11.0.124.2/24", "11.0.125.2/24", "11.0.126.2/24", "11.0.127.2/24", "11.0.128.2/24", "11.0.129.2/24", "11.0.130.2/24", "11.0.131.2/24", "11.0.132.2/24", "11.0.133.2/24", "11.0.134.2/24", "11.0.135.2/24", "11.0.136.2/24", "11.0.137.2/24", "11.0.138.2/24", "11.0.139.2/24", "11.0.140.2/24", "11.0.141.2/24", "11.0.142.2/24", "11.0.143.2/24", "11.0.144.2/24", "11.0.145.2/24", "11.0.146.2/24", "11.0.147.2/24", "11.0.148.2/24", "11.0.149.2/24", "11.0.150.2/24", "11.0.151.2/24", "11.0.152.2/24", "11.0.153.2/24", "11.0.154.2/24", "11.0.155.2/24", "11.0.156.2/24", "11.0.157.2/24", "11.0.158.2/24", "11.0.159.2/24", "11.0.160.2/24", "11.0.161.2/24", "11.0.162.2/24", "11.0.163.2/24", "11.0.164.2/24", "11.0.165.2/24", "11.0.166.2/24", "11.0.167.2/24", "11.0.168.2/24", "11.0.169.2/24", "11.0.170.2/24", "11.0.171.2/24", "11.0.172.2/24", "11.0.173.2/24", "11.0.174.2/24", "11.0.175.2/24", "11.0.176.2/24", "11.0.177.2/24", "11.0.178.2/24", "11.0.179.2/24", "11.0.180.2/24", "11.0.181.2/24", "11.0.182.2/24", "11.0.183.2/24", "11.0.184.2/24", "11.0.185.2/24", "11.0.186.2/24", "11.0.187.2/24", "11.0.188.2/24", "11.0.189.2/24", "11.0.190.2/24", "11.0.191.2/24", "11.0.192.2/24", "11.0.193.2/24", "11.0.194.2/24", "11.0.195.2/24", "11.0.196.2/24", "11.0.197.2/24", "11.0.198.2/24", "11.0.199.2/24", "11.0.200.2/24", "11.0.201.2/24", "11.0.202.2/24", "11.0.203.2/24", "11.0.204.2/24", "11.0.205.2/24", "11.0.206.2/24", "11.0.207.2/24", "11.0.208.2/24", "11.0.209.2/24", "11.0.210.2/24", "11.0.211.2/24", "11.0.212.2/24", "11.0.213.2/24", "11.0.214.2/24", "11.0.215.2/24", "11.0.216.2/24", "11.0.217.2/24", "11.0.218.2/24", "11.0.219.2/24", "11.0.220.2/24", "11.0.221.2/24", "11.0.222.2/24", "11.0.223.2/24", "11.0.224.2/24", "11.0.225.2/24", "11.0.226.2/24", "11.0.227.2/24", "11.0.228.2/24", "11.0.229.2/24", "11.0.230.2/24", "11.0.231.2/24", "11.0.232.2/24", "11.0.233.2/24", "11.0.234.2/24", "11.0.235.2/24", "11.0.236.2/24", "11.0.237.2/24", "11.0.238.2/24", "11.0.239.2/24", "11.0.240.2/24", "11.0.241.2/24", "11.0.242.2/24", "11.0.243.2/24", "11.0.244.2/24", "11.0.245.2/24", "11.0.246.2/24", "11.0.247.2/24", "11.0.248.2/24", "11.0.249.2/24", "11.0.250.2/24", "11.0.251.2/24", "11.0.252.2/24", "11.0.253.2/24", "11.0.254.2/24", "11.0.255.2/24", "11.1.0.2/24", "11.1.1.2/24", "11.1.2.2/24", "11.1.3.2/24", "11.1.4.2/24", "11.1.5.2/24", "11.1.6.2/24", "11.1.7.2/24", "11.1.8.2/24", "11.1.9.2/24", "11.1.10.2/24", "11.1.11.2/24", "11.1.12.2/24", "11.1.13.2/24", "11.1.14.2/24", "11.1.15.2/24", "11.1.16.2/24", "11.1.17.2/24", "11.1.18.2/24", "11.1.19.2/24", "11.1.20.2/24", "11.1.21.2/24", "11.1.22.2/24", "11.1.23.2/24", "11.1.24.2/24", "11.1.25.2/24", "11.1.26.2/24", "11.1.27.2/24", "11.1.28.2/24", "11.1.29.2/24", "11.1.30.2/24", "11.1.31.2/24", "11.1.32.2/24", "11.1.33.2/24", "11.1.34.2/24", "11.1.35.2/24", "11.1.36.2/24", "11.1.37.2/24", "11.1.38.2/24", "11.1.39.2/24", "11.1.40.2/24", "11.1.41.2/24", "11.1.42.2/24", "11.1.43.2/24", "11.1.44.2/24", "11.1.45.2/24", "11.1.46.2/24", "11.1.47.2/24", "11.1.48.2/24", "11.1.49.2/24", "11.1.50.2/24", "11.1.51.2/24", "11.1.52.2/24", "11.1.53.2/24", "11.1.54.2/24", "11.1.55.2/24", "11.1.56.2/24", "11.1.57.2/24", "11.1.58.2/24", "11.1.59.2/24", "11.1.60.2/24", "11.1.61.2/24", "11.1.62.2/24", "11.1.63.2/24", "11.1.64.2/24", "11.1.65.2/24", "11.1.66.2/24", "11.1.67.2/24", "11.1.68.2/24", "11.1.69.2/24", "11.1.70.2/24", "11.1.71.2/24", "11.1.72.2/24", "11.1.73.2/24", "11.1.74.2/24", "11.1.75.2/24", "11.1.76.2/24", "11.1.77.2/24", "11.1.78.2/24", "11.1.79.2/24", "11.1.80.2/24", "11.1.81.2/24", "11.1.82.2/24", "11.1.83.2/24", "11.1.84.2/24", "11.1.85.2/24", "11.1.86.2/24", "11.1.87.2/24", "11.1.88.2/24", "11.1.89.2/24", "11.1.90.2/24", "11.1.91.2/24", "11.1.92.2/24", "11.1.93.2/24", "11.1.94.2/24", "11.1.95.2/24", "11.1.96.2/24", "11.1.97.2/24", "11.1.98.2/24", "11.1.99.2/24", "11.1.100.2/24", "11.1.101.2/24", "11.1.102.2/24", "11.1.103.2/24", "11.1.104.2/24", "11.1.105.2/24", "11.1.106.2/24", "11.1.107.2/24", "11.1.108.2/24", "11.1.109.2/24", "11.1.110.2/24", "11.1.111.2/24", "11.1.112.2/24", "11.1.113.2/24", "11.1.114.2/24", "11.1.115.2/24", "11.1.116.2/24", "11.1.117.2/24", "11.1.118.2/24", "11.1.119.2/24", "11.1.120.2/24", "11.1.121.2/24", "11.1.122.2/24", "11.1.123.2/24", "11.1.124.2/24", "11.1.125.2/24", "11.1.126.2/24", "11.1.127.2/24", "11.1.128.2/24", "11.1.129.2/24", "11.1.130.2/24", "11.1.131.2/24", "11.1.132.2/24", "11.1.133.2/24", "11.1.134.2/24", "11.1.135.2/24", "11.1.136.2/24", "11.1.137.2/24", "11.1.138.2/24", "11.1.139.2/24", "11.1.140.2/24", "11.1.141.2/24", "11.1.142.2/24", "11.1.143.2/24", "11.1.144.2/24", "11.1.145.2/24", "11.1.146.2/24", "11.1.147.2/24", "11.1.148.2/24", "11.1.149.2/24", "11.1.150.2/24", "11.1.151.2/24", "11.1.152.2/24", "11.1.153.2/24", "11.1.154.2/24", "11.1.155.2/24", "11.1.156.2/24", "11.1.157.2/24", "11.1.158.2/24", "11.1.159.2/24", "11.1.160.2/24", "11.1.161.2/24", "11.1.162.2/24", "11.1.163.2/24", "11.1.164.2/24", "11.1.165.2/24", "11.1.166.2/24", "11.1.167.2/24", "11.1.168.2/24", "11.1.169.2/24", "11.1.170.2/24", "11.1.171.2/24", "11.1.172.2/24", "11.1.173.2/24", "11.1.174.2/24", "11.1.175.2/24", "11.1.176.2/24", "11.1.177.2/24", "11.1.178.2/24", "11.1.179.2/24", "11.1.180.2/24", "11.1.181.2/24", "11.1.182.2/24", "11.1.183.2/24", "11.1.184.2/24", "11.1.185.2/24", "11.1.186.2/24", "11.1.187.2/24", "11.1.188.2/24", "11.1.189.2/24", "11.1.190.2/24", "11.1.191.2/24", "11.1.192.2/24", "11.1.193.2/24", "11.1.194.2/24", "11.1.195.2/24", "11.1.196.2/24", "11.1.197.2/24", "11.1.198.2/24", "11.1.199.2/24", "11.1.200.2/24", "11.1.201.2/24", "11.1.202.2/24", "11.1.203.2/24", "11.1.204.2/24", "11.1.205.2/24", "11.1.206.2/24", "11.1.207.2/24", "11.1.208.2/24", "11.1.209.2/24", "11.1.210.2/24", "11.1.211.2/24", "11.1.212.2/24", "11.1.213.2/24", "11.1.214.2/24", "11.1.215.2/24", "11.1.216.2/24", "11.1.217.2/24", "11.1.218.2/24", "11.1.219.2/24", "11.1.220.2/24", "11.1.221.2/24", "11.1.222.2/24", "11.1.223.2/24", "11.1.224.2/24", "11.1.225.2/24", "11.1.226.2/24", "11.1.227.2/24", "11.1.228.2/24", "11.1.229.2/24", "11.1.230.2/24", "11.1.231.2/24", "11.1.232.2/24", "11.1.233.2/24", "11.1.234.2/24", "11.1.235.2/24", "11.1.236.2/24", "11.1.237.2/24", "11.1.238.2/24", "11.1.239.2/24", "11.1.240.2/24", "11.1.241.2/24", "11.1.242.2/24", "11.1.243.2/24", "11.1.244.2/24", "11.1.245.2/24", "11.1.246.2/24", "11.1.247.2/24", "11.1.248.2/24", "11.1.249.2/24", "11.1.250.2/24", "11.1.251.2/24", "11.1.252.2/24", "11.1.253.2/24", "11.1.254.2/24", "11.1.255.2/24", "11.2.0.2/24", "11.2.1.2/24", "11.2.2.2/24", "11.2.3.2/24", "11.2.4.2/24", "11.2.5.2/24", "11.2.6.2/24", "11.2.7.2/24", "11.2.8.2/24", "11.2.9.2/24", "11.2.10.2/24", "11.2.11.2/24", "11.2.12.2/24", "11.2.13.2/24", "11.2.14.2/24", "11.2.15.2/24", "11.2.16.2/24", "11.2.17.2/24", "11.2.18.2/24", "11.2.19.2/24", "11.2.20.2/24", "11.2.21.2/24", "11.2.22.2/24", "11.2.23.2/24", "11.2.24.2/24", "11.2.25.2/24", "11.2.26.2/24", "11.2.27.2/24", "11.2.28.2/24", "11.2.29.2/24", "11.2.30.2/24", "11.2.31.2/24", "11.2.32.2/24", "11.2.33.2/24", "11.2.34.2/24", "11.2.35.2/24", "11.2.36.2/24", "11.2.37.2/24", "11.2.38.2/24", "11.2.39.2/24", "11.2.40.2/24", "11.2.41.2/24", "11.2.42.2/24", "11.2.43.2/24", "11.2.44.2/24", "11.2.45.2/24", "11.2.46.2/24", "11.2.47.2/24", "11.2.48.2/24", "11.2.49.2/24", "11.2.50.2/24", "11.2.51.2/24", "11.2.52.2/24", "11.2.53.2/24", "11.2.54.2/24", "11.2.55.2/24", "11.2.56.2/24", "11.2.57.2/24", "11.2.58.2/24", "11.2.59.2/24", "11.2.60.2/24", "11.2.61.2/24", "11.2.62.2/24", "11.2.63.2/24", "11.2.64.2/24", "11.2.65.2/24", "11.2.66.2/24", "11.2.67.2/24", "11.2.68.2/24", "11.2.69.2/24", "11.2.70.2/24", "11.2.71.2/24", "11.2.72.2/24", "11.2.73.2/24", "11.2.74.2/24", "11.2.75.2/24", "11.2.76.2/24", "11.2.77.2/24", "11.2.78.2/24", "11.2.79.2/24", "11.2.80.2/24", "11.2.81.2/24", "11.2.82.2/24", "11.2.83.2/24", "11.2.84.2/24", "11.2.85.2/24", "11.2.86.2/24", "11.2.87.2/24", "11.2.88.2/24", "11.2.89.2/24", "11.2.90.2/24", "11.2.91.2/24", "11.2.92.2/24", "11.2.93.2/24", "11.2.94.2/24", "11.2.95.2/24", "11.2.96.2/24", "11.2.97.2/24", "11.2.98.2/24", "11.2.99.2/24", "11.2.100.2/24", "11.2.101.2/24", "11.2.102.2/24", "11.2.103.2/24", "11.2.104.2/24", "11.2.105.2/24", "11.2.106.2/24", "11.2.107.2/24", "11.2.108.2/24", "11.2.109.2/24", "11.2.110.2/24", "11.2.111.2/24", "11.2.112.2/24", "11.2.113.2/24", "11.2.114.2/24", "11.2.115.2/24", "11.2.116.2/24", "11.2.117.2/24", "11.2.118.2/24", "11.2.119.2/24", "11.2.120.2/24", "11.2.121.2/24", "11.2.122.2/24", "11.2.123.2/24", "11.2.124.2/24", "11.2.125.2/24", "11.2.126.2/24", "11.2.127.2/24", "11.2.128.2/24", "11.2.129.2/24", "11.2.130.2/24", "11.2.131.2/24", "11.2.132.2/24", "11.2.133.2/24", "11.2.134.2/24", "11.2.135.2/24", "11.2.136.2/24", "11.2.137.2/24", "11.2.138.2/24", "11.2.139.2/24", "11.2.140.2/24", "11.2.141.2/24", "11.2.142.2/24", "11.2.143.2/24", "11.2.144.2/24", "11.2.145.2/24", "11.2.146.2/24", "11.2.147.2/24", "11.2.148.2/24", "11.2.149.2/24", "11.2.150.2/24", "11.2.151.2/24", "11.2.152.2/24", "11.2.153.2/24", "11.2.154.2/24", "11.2.155.2/24", "11.2.156.2/24", "11.2.157.2/24", "11.2.158.2/24", "11.2.159.2/24", "11.2.160.2/24", "11.2.161.2/24", "11.2.162.2/24", "11.2.163.2/24", "11.2.164.2/24", "11.2.165.2/24", "11.2.166.2/24", "11.2.167.2/24", "11.2.168.2/24", "11.2.169.2/24", "11.2.170.2/24", "11.2.171.2/24", "11.2.172.2/24", "11.2.173.2/24", "11.2.174.2/24", "11.2.175.2/24", "11.2.176.2/24", "11.2.177.2/24", "11.2.178.2/24", "11.2.179.2/24", "11.2.180.2/24", "11.2.181.2/24", "11.2.182.2/24", "11.2.183.2/24", "11.2.184.2/24", "11.2.185.2/24", "11.2.186.2/24", "11.2.187.2/24", "11.2.188.2/24", "11.2.189.2/24", "11.2.190.2/24", "11.2.191.2/24", "11.2.192.2/24", "11.2.193.2/24", "11.2.194.2/24", "11.2.195.2/24", "11.2.196.2/24", "11.2.197.2/24", "11.2.198.2/24", "11.2.199.2/24", "11.2.200.2/24", "11.2.201.2/24", "11.2.202.2/24", "11.2.203.2/24", "11.2.204.2/24", "11.2.205.2/24", "11.2.206.2/24", "11.2.207.2/24", "11.2.208.2/24", "11.2.209.2/24", "11.2.210.2/24", "11.2.211.2/24", "11.2.212.2/24", "11.2.213.2/24", "11.2.214.2/24", "11.2.215.2/24", "11.2.216.2/24", "11.2.217.2/24", "11.2.218.2/24", "11.2.219.2/24", "11.2.220.2/24", "11.2.221.2/24", "11.2.222.2/24", "11.2.223.2/24", "11.2.224.2/24", "11.2.225.2/24", "11.2.226.2/24", "11.2.227.2/24", "11.2.228.2/24", "11.2.229.2/24", "11.2.230.2/24", "11.2.231.2/24", "11.2.232.2/24", "11.2.233.2/24", "11.2.234.2/24", "11.2.235.2/24", "11.2.236.2/24", "11.2.237.2/24", "11.2.238.2/24", "11.2.239.2/24", "11.2.240.2/24", "11.2.241.2/24", "11.2.242.2/24", "11.2.243.2/24", "11.2.244.2/24", "11.2.245.2/24", "11.2.246.2/24", "11.2.247.2/24", "11.2.248.2/24", "11.2.249.2/24", "11.2.250.2/24", "11.2.251.2/24", "11.2.252.2/24", "11.2.253.2/24", "11.2.254.2/24", "11.2.255.2/24", "11.3.0.2/24", "11.3.1.2/24", "11.3.2.2/24", "11.3.3.2/24", "11.3.4.2/24", "11.3.5.2/24", "11.3.6.2/24", "11.3.7.2/24", "11.3.8.2/24", "11.3.9.2/24", "11.3.10.2/24", "11.3.11.2/24", "11.3.12.2/24", "11.3.13.2/24", "11.3.14.2/24", "11.3.15.2/24", "11.3.16.2/24", "11.3.17.2/24", "11.3.18.2/24", "11.3.19.2/24", "11.3.20.2/24", "11.3.21.2/24", "11.3.22.2/24", "11.3.23.2/24", "11.3.24.2/24", "11.3.25.2/24", "11.3.26.2/24", "11.3.27.2/24", "11.3.28.2/24", "11.3.29.2/24", "11.3.30.2/24", "11.3.31.2/24", "11.3.32.2/24", "11.3.33.2/24", "11.3.34.2/24", "11.3.35.2/24", "11.3.36.2/24", "11.3.37.2/24", "11.3.38.2/24", "11.3.39.2/24", "11.3.40.2/24", "11.3.41.2/24", "11.3.42.2/24", "11.3.43.2/24", "11.3.44.2/24", "11.3.45.2/24", "11.3.46.2/24", "11.3.47.2/24", "11.3.48.2/24", "11.3.49.2/24", "11.3.50.2/24", "11.3.51.2/24", "11.3.52.2/24", "11.3.53.2/24", "11.3.54.2/24", "11.3.55.2/24", "11.3.56.2/24", "11.3.57.2/24", "11.3.58.2/24", "11.3.59.2/24", "11.3.60.2/24", "11.3.61.2/24", "11.3.62.2/24", "11.3.63.2/24", "11.3.64.2/24", "11.3.65.2/24", "11.3.66.2/24", "11.3.67.2/24", "11.3.68.2/24", "11.3.69.2/24", "11.3.70.2/24", "11.3.71.2/24", "11.3.72.2/24", "11.3.73.2/24", "11.3.74.2/24", "11.3.75.2/24", "11.3.76.2/24", "11.3.77.2/24", "11.3.78.2/24", "11.3.79.2/24", "11.3.80.2/24", "11.3.81.2/24", "11.3.82.2/24", "11.3.83.2/24", "11.3.84.2/24", "11.3.85.2/24", "11.3.86.2/24", "11.3.87.2/24", "11.3.88.2/24", "11.3.89.2/24", "11.3.90.2/24", "11.3.91.2/24", "11.3.92.2/24", "11.3.93.2/24", "11.3.94.2/24", "11.3.95.2/24", "11.3.96.2/24", "11.3.97.2/24", "11.3.98.2/24", "11.3.99.2/24", "11.3.100.2/24", "11.3.101.2/24", "11.3.102.2/24", "11.3.103.2/24", "11.3.104.2/24", "11.3.105.2/24", "11.3.106.2/24", "11.3.107.2/24", "11.3.108.2/24", "11.3.109.2/24", "11.3.110.2/24", "11.3.111.2/24", "11.3.112.2/24", "11.3.113.2/24", "11.3.114.2/24", "11.3.115.2/24", "11.3.116.2/24", "11.3.117.2/24", "11.3.118.2/24", "11.3.119.2/24", "11.3.120.2/24", "11.3.121.2/24", "11.3.122.2/24", "11.3.123.2/24", "11.3.124.2/24", "11.3.125.2/24", "11.3.126.2/24", "11.3.127.2/24", "11.3.128.2/24", "11.3.129.2/24", "11.3.130.2/24", "11.3.131.2/24", "11.3.132.2/24", "11.3.133.2/24", "11.3.134.2/24", "11.3.135.2/24", "11.3.136.2/24", "11.3.137.2/24", "11.3.138.2/24", "11.3.139.2/24", "11.3.140.2/24", "11.3.141.2/24", "11.3.142.2/24", "11.3.143.2/24", "11.3.144.2/24", "11.3.145.2/24", "11.3.146.2/24", "11.3.147.2/24", "11.3.148.2/24", "11.3.149.2/24", "11.3.150.2/24", "11.3.151.2/24", "11.3.152.2/24", "11.3.153.2/24", "11.3.154.2/24", "11.3.155.2/24", "11.3.156.2/24", "11.3.157.2/24", "11.3.158.2/24", "11.3.159.2/24", "11.3.160.2/24", "11.3.161.2/24", "11.3.162.2/24", "11.3.163.2/24", "11.3.164.2/24", "11.3.165.2/24", "11.3.166.2/24", "11.3.167.2/24", "11.3.168.2/24", "11.3.169.2/24", "11.3.170.2/24", "11.3.171.2/24", "11.3.172.2/24", "11.3.173.2/24", "11.3.174.2/24", "11.3.175.2/24", "11.3.176.2/24", "11.3.177.2/24", "11.3.178.2/24", "11.3.179.2/24", "11.3.180.2/24", "11.3.181.2/24", "11.3.182.2/24", "11.3.183.2/24", "11.3.184.2/24", "11.3.185.2/24", "11.3.186.2/24", "11.3.187.2/24", "11.3.188.2/24", "11.3.189.2/24", "11.3.190.2/24", "11.3.191.2/24", "11.3.192.2/24", "11.3.193.2/24", "11.3.194.2/24", "11.3.195.2/24", "11.3.196.2/24", "11.3.197.2/24", "11.3.198.2/24", "11.3.199.2/24", "11.3.200.2/24", "11.3.201.2/24", "11.3.202.2/24", "11.3.203.2/24", "11.3.204.2/24", "11.3.205.2/24", "11.3.206.2/24", "11.3.207.2/24", "11.3.208.2/24", "11.3.209.2/24", "11.3.210.2/24", "11.3.211.2/24", "11.3.212.2/24", "11.3.213.2/24", "11.3.214.2/24", "11.3.215.2/24", "11.3.216.2/24", "11.3.217.2/24", "11.3.218.2/24", "11.3.219.2/24", "11.3.220.2/24", "11.3.221.2/24", "11.3.222.2/24", "11.3.223.2/24", "11.3.224.2/24", "11.3.225.2/24", "11.3.226.2/24", "11.3.227.2/24", "11.3.228.2/24", "11.3.229.2/24", "11.3.230.2/24", "11.3.231.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-99"}]}, "of:000002ddebbeb549/98": {"interfaces": [{"ips": ["11.0.97.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-98"}]}, "of:000002ddebbeb549/24": {"interfaces": [{"ips": ["11.0.23.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-24"}]}, "of:000002ddebbeb549/25": {"interfaces": [{"ips": ["11.0.24.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-25"}]}, "of:000002ddebbeb549/26": {"interfaces": [{"ips": ["11.0.25.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-26"}]}, "of:000002ddebbeb549/27": {"interfaces": [{"ips": ["11.0.26.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-27"}]}, "of:000002ddebbeb549/20": {"interfaces": [{"ips": ["11.0.19.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-20"}]}, "of:000002ddebbeb549/21": {"interfaces": [{"ips": ["11.0.20.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-21"}]}, "of:000002ddebbeb549/22": {"interfaces": [{"ips": ["11.0.21.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-22"}]}, "of:000002ddebbeb549/23": {"interfaces": [{"ips": ["11.0.22.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-23"}]}, "of:000002ddebbeb549/28": {"interfaces": [{"ips": ["11.0.27.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-28"}]}, "of:000002ddebbeb549/29": {"interfaces": [{"ips": ["11.0.28.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-29"}]}, "of:000002ddebbeb549/46": {"interfaces": [{"ips": ["11.0.45.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-46"}]}, "of:000002ddebbeb549/47": {"interfaces": [{"ips": ["11.0.46.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-47"}]}, "of:000002ddebbeb549/44": {"interfaces": [{"ips": ["11.0.43.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-44"}]}, "of:000002ddebbeb549/45": {"interfaces": [{"ips": ["11.0.44.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-45"}]}, "of:000002ddebbeb549/42": {"interfaces": [{"ips": ["11.0.41.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-42"}]}, "of:000002ddebbeb549/43": {"interfaces": [{"ips": ["11.0.42.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-43"}]}, "of:000002ddebbeb549/40": {"interfaces": [{"ips": ["11.0.39.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-40"}]}, "of:000002ddebbeb549/41": {"interfaces": [{"ips": ["11.0.40.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-41"}]}, "of:000002ddebbeb549/48": {"interfaces": [{"ips": ["11.0.47.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-48"}]}, "of:000002ddebbeb549/49": {"interfaces": [{"ips": ["11.0.48.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-49"}]}, "of:000002ddebbeb549/33": {"interfaces": [{"ips": ["11.0.32.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-33"}]}, "of:000002ddebbeb549/32": {"interfaces": [{"ips": ["11.0.31.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-32"}]}, "of:000002ddebbeb549/31": {"interfaces": [{"ips": ["11.0.30.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-31"}]}, "of:000002ddebbeb549/30": {"interfaces": [{"ips": ["11.0.29.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-30"}]}, "of:000002ddebbeb549/37": {"interfaces": [{"ips": ["11.0.36.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-37"}]}, "of:000002ddebbeb549/36": {"interfaces": [{"ips": ["11.0.35.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-36"}]}, "of:000002ddebbeb549/35": {"interfaces": [{"ips": ["11.0.34.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-35"}]}, "of:000002ddebbeb549/34": {"interfaces": [{"ips": ["11.0.33.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-34"}]}, "of:000002ddebbeb549/39": {"interfaces": [{"ips": ["11.0.38.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-39"}]}, "of:000002ddebbeb549/38": {"interfaces": [{"ips": ["11.0.37.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-38"}]}, "of:000002ddebbeb549/55": {"interfaces": [{"ips": ["11.0.54.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-55"}]}, "of:000002ddebbeb549/54": {"interfaces": [{"ips": ["11.0.53.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-54"}]}, "of:000002ddebbeb549/57": {"interfaces": [{"ips": ["11.0.56.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-57"}]}, "of:000002ddebbeb549/56": {"interfaces": [{"ips": ["11.0.55.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-56"}]}, "of:000002ddebbeb549/51": {"interfaces": [{"ips": ["11.0.50.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-51"}]}, "of:000002ddebbeb549/50": {"interfaces": [{"ips": ["11.0.49.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-50"}]}, "of:000002ddebbeb549/53": {"interfaces": [{"ips": ["11.0.52.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-53"}]}, "of:000002ddebbeb549/52": {"interfaces": [{"ips": ["11.0.51.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-52"}]}, "of:000002ddebbeb549/59": {"interfaces": [{"ips": ["11.0.58.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-59"}]}, "of:000002ddebbeb549/58": {"interfaces": [{"ips": ["11.0.57.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-58"}]}, "of:000002ddebbeb549/60": {"interfaces": [{"ips": ["11.0.59.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-60"}]}, "of:000002ddebbeb549/61": {"interfaces": [{"ips": ["11.0.60.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-61"}]}, "of:000002ddebbeb549/62": {"interfaces": [{"ips": ["11.0.61.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-62"}]}, "of:000002ddebbeb549/63": {"interfaces": [{"ips": ["11.0.62.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-63"}]}, "of:000002ddebbeb549/64": {"interfaces": [{"ips": ["11.0.63.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-64"}]}, "of:000002ddebbeb549/65": {"interfaces": [{"ips": ["11.0.64.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-65"}]}, "of:000002ddebbeb549/66": {"interfaces": [{"ips": ["11.0.65.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-66"}]}, "of:000002ddebbeb549/67": {"interfaces": [{"ips": ["11.0.66.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-67"}]}, "of:000002ddebbeb549/68": {"interfaces": [{"ips": ["11.0.67.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-68"}]}, "of:000002ddebbeb549/69": {"interfaces": [{"ips": ["11.0.68.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-69"}]}, "of:000002ddebbeb549/79": {"interfaces": [{"ips": ["11.0.78.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-79"}]}, "of:000002ddebbeb549/78": {"interfaces": [{"ips": ["11.0.77.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-78"}]}, "of:000002ddebbeb549/77": {"interfaces": [{"ips": ["11.0.76.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-77"}]}, "of:000002ddebbeb549/76": {"interfaces": [{"ips": ["11.0.75.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-76"}]}, "of:000002ddebbeb549/75": {"interfaces": [{"ips": ["11.0.74.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-75"}]}, "of:000002ddebbeb549/74": {"interfaces": [{"ips": ["11.0.73.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-74"}]}, "of:000002ddebbeb549/73": {"interfaces": [{"ips": ["11.0.72.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-73"}]}, "of:000002ddebbeb549/72": {"interfaces": [{"ips": ["11.0.71.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-72"}]}, "of:000002ddebbeb549/71": {"interfaces": [{"ips": ["11.0.70.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-71"}]}, "of:000002ddebbeb549/70": {"interfaces": [{"ips": ["11.0.69.2/24"], "mac": "00:00:00:00:00:01", "name": "b1-70"}]}}, "devices": {"of:000002ddebbeb549": {"basic": {"driver": "softrouter"}}}}
\ No newline at end of file
diff --git a/src/test/setup/prerequisites.sh b/src/test/setup/prerequisites.sh
new file mode 100755
index 0000000..bc3f4b2
--- /dev/null
+++ b/src/test/setup/prerequisites.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+apt-get update
+apt-get -y install apt-transport-https ca-certificates
+apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
+if [ ! -f /etc/apt/sources.list.d/docker.list ]; then
+    echo deb https://apt.dockerproject.org/repo ubuntu-trusty main |  tee /etc/apt/sources.list.d/docker.list
+fi
+apt-get update
+apt-get purge lxc-docker || true
+apt-get -y install linux-image-extra-$(uname -r)
+apt-get -y install apparmor
+echo "Installing Docker"
+apt-get -y install docker-engine
+service docker start
+echo "Verifying Docker installation"
+docker run --rm hello-world || exit 127
+docker rmi hello-world
+echo "Pulling ONOS latest and 1.5"
+docker pull onosproject/onos:latest || exit 127
+docker pull onosproject/onos:1.5 || exit 127
+apt-get -y install wget git python python-dev python-pip python-setuptools python-scapy python-pexpect tcpdump arping
+easy_install nose
+apt-get -y install openvswitch-common openvswitch-switch
+pip install -U scapy
+pip install monotonic
+pip install configObj
+pip install -U docker-py
+pip install -U pyyaml
+pip install -U nsenter
+pip install -U pyroute2
+pip install -U netaddr
+pip install scapy-ssl_tls
+( cd /tmp && git clone https://github.com/jpetazzo/pipework.git && cp -v pipework/pipework /usr/bin && rm -rf pipework )
+## Special mode to pull cord-tester repo in case prereqs was installed by hand instead of repo
+if [ "$1" = "-test" ]; then
+    rm -rf cord-tester
+    git clone https://github.cyanoptics.com/cgaonker/cord-tester.git
+fi
diff --git a/src/test/setup/quagga-config/bgpd.conf b/src/test/setup/quagga-config/bgpd.conf
new file mode 100644
index 0000000..e543ff4
--- /dev/null
+++ b/src/test/setup/quagga-config/bgpd.conf
@@ -0,0 +1,8 @@
+hostname bgpd
+password zebra
+router bgp 1000
+bgp router-id 10.10.0.1
+neighbor 10.10.0.19 remote-as 1019
+neighbor 10.10.0.19 advertisement-interval 1
+neighbor 10.10.0.19 route-server-client
+neighbor 10.10.0.19 timers 30 90
\ No newline at end of file
diff --git a/src/test/setup/quagga-config/start.sh b/src/test/setup/quagga-config/start.sh
new file mode 100755
index 0000000..42a7ded
--- /dev/null
+++ b/src/test/setup/quagga-config/start.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+ulimit -n 65536
+ip a add 10.10.0.3/16 dev eth1
+#bgpd -u root -f /root/config/bgpd.conf &
+conf_file=${1:-/root/config/testrib.conf}
+base_conf=$(basename $conf_file)
+base_conf=${base_conf%%.conf}
+if [[ $base_conf == bgpd* ]]; then
+    /usr/local/sbin/bgpd -u root -f $conf_file
+else
+    /usr/local/sbin/zebra -u root -f $conf_file
+fi
diff --git a/src/test/setup/quagga-config/stop.sh b/src/test/setup/quagga-config/stop.sh
new file mode 100755
index 0000000..8eab135
--- /dev/null
+++ b/src/test/setup/quagga-config/stop.sh
@@ -0,0 +1,2 @@
+#!/bin/bash
+pkill -9 zebra
\ No newline at end of file
diff --git a/src/test/setup/quagga-config/testrib.conf b/src/test/setup/quagga-config/testrib.conf
new file mode 100644
index 0000000..af2f213
--- /dev/null
+++ b/src/test/setup/quagga-config/testrib.conf
@@ -0,0 +1,20 @@
+!
+! Zebra configuration saved from vty
+!   2007/04/01 17:46:48
+!
+password zebra
+log stdout
+service advanced-vty
+!
+debug zebra rib
+debug zebra kernel
+debug zebra fpm
+!
+!interface eth1
+! ip address 10.10.0.3/16
+
+! statics that should be subsumed by connected routes, according to interface
+! state
+line vty
+ exec-timeout 0 0
+!
diff --git a/src/test/setup/radius-config/db/radius.sqlite3 b/src/test/setup/radius-config/db/radius.sqlite3
new file mode 100644
index 0000000..19a96c7
--- /dev/null
+++ b/src/test/setup/radius-config/db/radius.sqlite3
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/README.rst b/src/test/setup/radius-config/freeradius/README.rst
new file mode 100644
index 0000000..590c023
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/README.rst
@@ -0,0 +1,657 @@
+Upgrading to Version 3.0
+========================
+
+.. contents:: Sections
+   :depth: 2
+
+.. important:: 
+   The configuration for 3.0 is *largely* compatible with the 2.x.x
+   configuration.  However, it is NOT possible to simply use the 2.x.x
+   configuration as-is.  Instead, you should re-create it.
+
+Security
+--------
+
+A number of configuration items have moved into the "security"
+subsection of radiusd.conf.  If you use these, you should move them.
+Otherwise, they can be ignored.
+
+The list of moved options is::
+
+  chroot
+  user
+  group
+  allow_core_dumps
+  reject_delay
+  status_server
+
+These entries should be moved from "radiusd.conf" to the "security"
+subsection of that file.
+
+Naming
+------
+
+Many names used by configuration items were inconsistent in earlier
+versions of the server.  These names have been unified in version 3.0.
+
+If a file is being referenced or created the config item ``filename``
+is used.
+
+If a file is being created, the initial permissions are set by the
+``permissions`` config item.
+
+If a directory hierarchy needs to be created, the permissions are set
+by ``dir_permissions``.
+
+If an external host is referenced in the context of a module the
+``server`` config item is used.
+
+Unless the config item is a well recognised portmanteau
+(as ``filename`` is for example), it must be written as multiple
+distinct words separated by underscores ``_``.
+
+The configuration items ``file``, ``script_file``, ``module``,
+``detail``, ``detailfile``, ``attrsfile``, ``perm``, ``dirperm``,
+``detailperm``, and ``hostname`` are deprecated. As well as any false
+portmanteaus, and configuration items that used hyphens as word
+delimiters.  e.g. ``foo-bar`` has been changed to ``foo_bar``.  Please
+update your module configuration to use the new syntax.
+
+In most cases the server will tell you the replacement config item to
+use.  As always, run the server in debugging mode to see these
+messages.
+
+Modules Directory
+-----------------
+
+As of version 3.0, the ``modules/`` directory no longer exists.
+
+Instead, all "example" modules have been put into the
+``mods-available/`` directory.  Modules which can be loaded by the
+server are placed in the ``mods-enabled/`` directory.  All of the
+modules in that directory will be loaded.  This means that the
+``instantiate`` section of radiusd.conf is less important.  The only
+reason to list a module in the ``instantiate`` section is to force
+ordering when the modules are loaded.
+
+Modules can be enabled by creating a soft link.  For module ``foo``, do::
+
+  $ cd raddb
+  $ ln -s mods-available/foo mods-enabled/foo
+
+To create "local" versions of the modules, we suggest copying the file
+instead.  This leaves the original file (with documentation) in the
+``mods-available/`` directory.  Local changes should go into the
+``mods-enabled/`` directory.
+
+Module-specific configuration files are now in the ``mods-config/``
+directory.  This change allows for better organization, and means that
+there are fewer files in the main ``raddb`` directory.  See
+``mods-config/README.rst`` for more details.
+
+Changed Modules
+---------------
+
+The following modules have been changed.
+
+
+rlm_sql
+~~~~~~~
+
+The SQL configuration has been moved from ``sql.conf`` to
+``mods-available/sql``.  The ``sqlippool.conf`` file has also been
+moved to ``mods-available/sqlippool``.
+
+The SQL module configuration has been changed.  The old connection
+pool options are no longer accepted::
+
+  num_sql_socks
+  connect_failure_retry_delay
+  lifetime
+  max_queries
+
+Instead, a connection pool configuration is used.  This configuration
+contains all of the functionality of the previous configuration, but
+in a more generic form.  It also is used in multiple modules, meaning
+that there are fewer different configuration items.  The mapping
+between the configuration items is::
+
+  num_sql_socks			-> pool { max }
+  connect_failure_retry_delay	-> pool { retry_delay }
+  lifetime			-> pool { lifetime }
+  max_queries			-> pool { uses }
+
+The pool configuration adds a number of new configuration options,
+which allow the administrator to better control how FreeRADIUS uses
+SQL connection pools.
+
+The following parameters have been changed::
+
+  trace				-> removed
+  tracefile			-> logfile
+
+The logfile is intended to log SQL queries performed.  If you need to
+debug the server, use debugging mode.  If ``logfile`` is set, then
+*all* SQL queries will go to ``logfile``.
+
+You can now use a NULL SQL database::
+
+  driver = rlm_sql_null
+
+This is an empty driver which will always return "success".  It is
+intended to be used to replace the ``sql_log`` module, and to work in
+conjunction with the ``radsqlrelay`` program.  Simply take your normal
+configuration for raddb/mods-enabled/sql, and set::
+
+  driver = rlm_sql_null
+  ...
+  logfile = ${radacctdir}/sql.log
+
+All of the SQL queries will be logged to that file.  The connection
+pool does not need to be configured for the ``null`` SQL driver.  It
+can be left as-is, or deleted from the SQL configuration file.
+
+rlm_sql_sybase
+~~~~~~~~~~~~~~
+
+The ``rlm_sql_sybase`` module has been renamed to ``rlm_sql_freetds``
+and the old ``rlm_sql_freetds`` module has been removed.
+
+``rlm_sql_sybase`` used the newer ct-lib API, and ``rlm_sql_freetds``
+used an older API and was incomplete.
+
+The new ``rlm_sql_freetds`` module now also supports database
+selection on connection startup so ``use`` statements no longer
+have to be included in queries.
+
+sql/dialup.conf
+~~~~~~~~~~~~~~~
+
+Queries for post-auth and accounting calls have been re-arranged.  The
+SQL module will now expand the 'reference' configuration item in the
+appropriate sub-section, and resolve this to a configuration
+item. This behaviour is similar to rlm_linelog.  This dynamic
+expansion allows for a dynamic mapping between accounting types and
+SQL queries.  Previously, the mapping was fixed.  Any "new" accounting
+type was ignored by the module.  Now, support for any accounting type
+can be added by just adding a new target, as below.
+
+Queries from v2.x.x may be manually copied to the new v3.0
+``dialup.conf`` file (``raddb/sql/main/<dialect>/queries.conf``).
+When doing this you may also need to update references to the
+accounting tables, as their definitions will now be outside of
+the subsection containing the query.
+
+The mapping from old "fixed" query to new "dynamic" query is as follows::
+
+  accounting_onoff_query		-> accounting.type.accounting-on.query
+  accounting_update_query		-> accounting.type.interim-update.query
+  accounting_update_query_alt		+> accounting.type.interim-update.query
+  accounting_start_query		-> accounting.type.start.query
+  accounting_start_query_alt		+> accounting.type.start.query
+  accounting_stop_query			-> accounting.type.stop.query
+  accounting_stop_query_alt		+> accounting.type.stop.query
+  postauth_query			-> post-auth.query
+
+Alternatively a 2.x.x config may be patched to work with the
+3.0 module by adding the following::
+
+  accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+	type {
+		accounting-on {
+			query = "${....accounting_onoff_query}"
+		}
+		accounting-off {
+			query = "${....accounting_onoff_query}"
+		}
+		start {
+			query = "${....accounting_start_query}"
+			query = "${....accounting_start_query_alt}"
+		}
+		interim-update {
+			query = "${....accounting_update_query}"
+			query = "${....accounting_update_query_alt}"
+		}
+		stop {
+			query = "${....accounting_stop_query}"
+			query = "${....accounting_stop_query_alt}"
+		}
+	}
+  }
+
+  post-auth {
+	query = "${..postauth_query}"
+  }
+
+In general, it is safer to migrate the configuration rather than
+trying to "patch" it, to make it look like a v2 configuration.
+
+Note that the sub-sections holding the queries are labelled
+``accounting-on``, and not ``accounting_on``.  The reason is that the
+names of these sections are taken directly from the
+``Accounting-Request`` packet, and the ``Acct-Status-Type`` field.
+The ``sql`` module looks at the value of that field, and then looks
+for a section of that name, in order to find the query to use.
+
+That process means that the server can be extended to support any new
+value of ``Acct-Status-Type``, simply by adding a named sub-section,
+and a query.  This behavior is preferable to that of v2, which had
+hard-coded queries for certain ``Acct-Status-Type`` values, and was
+ignored all other values.
+
+rlm_ldap
+~~~~~~~~
+
+The LDAP module configuration has been substantially changed.  Please
+read ``raddb/mods-available/ldap``.  It now uses a connection pool,
+just like the SQL module.
+
+Many of the configuration items remain the same, but they have been
+moved into subsections.  This change is largely cosmetic, but it makes
+the configuration clearer.  Instead of having a large set of random
+configuration items, they are now organized into logical groups.
+
+You will need to read your old LDAP configuration, and migrate it
+manually to the new configuration.  Simply copying the old
+configuration WILL NOT WORK.
+
+Users upgrading from 2.x.x who used to call the ldap module in
+``post-auth`` should now set ``edir_autz = yes``, and remove the ``ldap``
+module from the ``post-auth`` section.
+
+rlm_ldap and LDAP-Group
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In 2.x.x the registration of the ``LDAP-Group`` pair comparison was done
+by the last instance of rlm_ldap to be instantiated. In 3.0 this has
+changed so that only the default ``ldap {}`` instance registers
+``LDAP-Group``.
+
+If ``<instance>-LDAP-Group`` is already used throughout your configuration
+no changes will be needed.
+
+rlm_ldap authentication
+~~~~~~~~~~~~~~~~~~~~~~~
+
+In 2.x.x the LDAP module had a ``set_auth_type`` configuration item,
+which forced ``Auth-Type := ldap``. This was removed in 3.x.x as it
+often did not work, and was not consistent with the rest of the
+server.  We generally recommend that LDAP should be used as a
+database, and that FreeRADIUS should do authentication.
+
+The only reason to use ``Auth-Type := ldap`` is when the LDAP server
+will not supply the "known good" password to FreeRADIUS, *and* where
+the Access-Request contains User-Password.  This situation happens
+only for Active Directory.  If you think you need to force ``Auth-Type
+:= ldap`` in other situations, you are very likely to be wrong.
+
+The following is an example of what should be inserted into the
+``authorize {}`` and ``authenticate {}`` sections of the relevant
+virtual-servers, to get functionality equivalent to v2.x::
+
+  authorize {
+    ...
+    ldap
+    if ((ok || updated) && User-Password) {
+      update control {
+	Auth-Type := ldap
+      }
+    }
+    ...
+  }
+  
+  authenticate {
+    ...
+    Auth-Type ldap {
+      ldap   
+    }
+    ...
+  }
+
+rlm_eap
+~~~~~~~
+
+The EAP configuration has been moved from ``eap.conf`` to
+``mods-available/eap``.  A new ``pwd`` subsection has been added for
+EAP-PWD.
+
+rlm_expiration & rlm_logintime
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The rlm_expiration and rlm_logintime modules no longer add a ``Reply-Message``,
+the same behaviour can be achieved checking the return code of the module and
+adding the ``Reply-Message`` with unlang::
+
+  expiration
+  if (userlock) {
+    update reply {
+      Reply-Message := "Your account has expired"
+    }
+  }
+
+rlm_unix
+~~~~~~~~
+
+The ``unix`` module does not have an ``authenticate`` section.  So you
+cannot set ``Auth-Type := System``.  The ``unix`` module has also been
+deleted from the examples in ``sites-available/``.  Listing it there
+has been deprecated for many years.
+
+The PAP module can do crypt authentication.  It should be used instead
+of Unix authentication.
+
+The Unix module still can pull the passwords from ``/etc/passwd``, or
+``/etc/shadow``.  This is done by listing it in the ``authorize``
+section, as is done in the examples in ``sites-available/``.  However,
+some systems using NIS or NSS will not supply passwords to the
+``unix`` module.  For those systems, we recommend putting users and
+passwords into a database, instead of relying on ``/etc/passwd``.
+
+New Modules
+-----------
+
+rlm_date
+~~~~~~~~
+
+Instances of rlm_date register an xlat method which can translate
+integer and date values to an arbitrarily formatted date time
+string, or an arbitrarily formated time string to an integer, 
+depending on the attribute type passed.
+
+rlm_rest
+~~~~~~~~
+
+The ``rest`` module is used to translate RADIUS requests into 
+RESTfull HTTP requests. Currently supported body types are JSON
+and POST.
+
+rlm_unpack
+~~~~~~~~~~
+
+The ``unpack`` module is used to turn data buried inside of binary
+attributes.  e.g. if we have ``Class = 0x00000001020304`` then::
+
+  Tmp-Integer-0 := "%{unpack:&Class 4 short}"
+
+will unpack octets 4 and 5 as a "short", which has value 0x0304.
+All integers are assumed to be in network byte order.
+
+rlm_yubikey
+~~~~~~~~~~~
+
+The ``yubikey`` module can be used to forward yubikey OTP token
+values to a Yubico validation server, or decrypt the token 
+using a PSK.
+
+Deleted Modules
+---------------
+
+The following modules have been deleted, and are no longer supported
+in Version 3.  If you are using one of these modules, your
+configuration can probably be changed to not need it.  Otherwise email
+the freeradius-devel list, and ask about the module.
+
+rlm_acct_unique
+~~~~~~~~~~~~~~~
+
+This module has been replaced by the "acct_unique" policy.  See
+raddb/policy.d/accounting.
+
+The method for calculating the value of acct_unique has changed.
+However, as this method was configurable, this change should not
+matter.  The only issue is in having a v2 and v3 server writing to the
+same database at the same time.  They will calculate different values
+for Acct-Unique-Id.
+
+rlm_acctlog
+~~~~~~~~~~~
+
+You should use rlm_linelog instead.  That module has a superset of the
+acctlog functionality.
+
+rlm_attr_rewrite
+~~~~~~~~~~~~~~~~
+
+The attr_rewrite module looked for an attribute, and then re-wrote it,
+or created a new attribute.  All of that can be done in "unlang".
+
+A sample configuration in "unlang" is::
+
+  if (request:Calling-Station-Id) {
+    update request {
+      Calling-Station-Id := "...."
+    }
+  }
+
+We suggest updating all uses of attr_rewrite to use unlang instead.
+
+rlm_checkval
+~~~~~~~~~~~~
+
+The checkval module compared two attributes.  All of that can be done in "unlang"::
+
+  if (&request:Calling-Station-Id == &control:Calling-Station-Id) {
+    ok
+  }
+
+We suggest updating all uses of checkval to use unlang instead.
+
+rlm_dbm
+~~~~~~~
+
+No one seems to use it.  There is no sample configuration for it.
+There is no speed advantage to using it over the "files" module.
+Modern systems are fast enough that 10K entries can be read from the
+"users" file in about 10ms.  If you need more users than that, use a
+real database such as SQL.
+
+rlm_fastusers
+~~~~~~~~~~~~~
+
+No one seems to use it.  It has been deprecated since Version 2.0.0.
+The "files" module was rewritten so that the "fastusers" module was no
+longer necessary.
+
+rlm_policy
+~~~~~~~~~~
+
+No one seems to use it.  Almost all of its functionality is available
+via "unlang".
+
+rlm_sim_files
+~~~~~~~~~~~~~
+
+The rlm_sim_files module has been deleted.  It was never marked "stable",
+and was never used in a production environment.  There are better ways
+to test EAP.
+
+If you want similar functionality, see rlm_passwd.  It can read CSV
+files, and create attributes from them.
+
+rlm_sql_log
+~~~~~~~~~~~
+
+This has been replaced with the "null" sql driver.  See
+raddb/mods-available/sql for an example configuration.
+
+The main SQL module has more functionality than rlm_sql_log, and
+results in less code in the server.
+
+Other Functionality
+-------------------
+
+The following is a list of new / changed functionality.
+
+RadSec
+~~~~~~
+
+RadSec (or RADIUS over TLS) is now supported.  RADIUS over bare TCP
+is also supported, but is recommended only for secure networks.
+
+See ``sites-available/tls`` for complete details on using TLS.  The server
+can both receive incoming TLS connections, and also originate outgoing
+TLS connections.
+
+The TLS configuration is taken from the old EAP-TLS configuration.  It
+is largely identical to the old EAP-TLS configuration, so it should be
+simple to use and configure.  It re-uses much of the EAP-TLS code,
+so it is well-tested and reliable.
+
+Once RadSec is enabled, normal debugging mode will not work.  This is
+because the TLS code requires threading to work properly.  Instead of doing::
+
+  $ radiusd -X
+
+you will need to do::
+
+  $ radiusd -fxx -l stdout
+
+That's the price to pay for using RadSec.  This limitation may be
+lifted in a future version of the server.
+
+
+PAP and User-Password
+~~~~~~~~~~~~~~~~~~~~~
+
+From version 3.0 onwards the server no longer supports authenticating
+against a cleartext password in the 'User-Password' attribute. Any
+occurences of this (for instance, in the users file) should now be changed
+to 'Cleartext-Password' instead.
+
+e.g. change entries like this::
+
+  bob User-Password == "hello"
+
+to ones like this::
+
+  bob Cleartext-Password := "hello"
+
+
+If this is not done, authentication will likely fail.  The server will
+also print a helpful message in debugging mode.
+
+If it really is impossible to do this, the following unlang inserted above
+the call to the pap module may be used to copy User-Password to the correct
+attribute::
+
+  if (!control:Cleartext-Password && control:User-Password) {
+    update control {
+      Cleartext-Password := "%{control:User-Password}"
+    }
+  }
+
+However, this should only be seen as a temporary, not permanent, fix.
+It is better to fix your databases to use the correct configuration.
+
+Unlang
+~~~~~~
+
+The unlang policy language is compatible with v2, but has a number of
+new features.  See ``man unlang`` for complete documentation.
+
+ERRORS
+
+Many more errors are caught when the server is starting up.  Syntax
+errors in ``unlang`` are caught, and a helpful error message is
+printed.  The error message points to the exact place where the error
+occurred::
+
+  ./raddb/sites-enabled/default[230]: Parse error in condition
+  ERROR:  if (User-Name ! "bob") {
+  ERROR:                ^ Invalid operator
+
+``update`` sections are more generic.  Instead of doing ``update
+reply``, you can do the following::
+
+  update {
+	  reply:Class := 0x0000
+	  control:Cleartext-Password := "hello"
+  }
+
+This change means that you need fewer ``update`` sections.
+
+COMPARISONS
+
+Attribute comparisons can be done via the ``&`` operator.  When you
+needed to compare two attributes, the old comparison style was::
+
+  if (User-Name == "%{control:Tmp-String-0}") {
+
+This syntax is inefficient, as the ``Tmp-String-0`` attribute would be
+printed to an intermediate string, causing unnecessary work.  You can
+now instead compare the two attributes directly::
+
+  if (&User-Name == &control:Tmp-String-0) {
+
+See ``man unlang`` for more details.
+
+CASTS
+
+Casts are now permitted.  This allows you to force type-specific
+comparisons::
+
+  if (<ipaddr>"%{sql: SELECT...}" == 127.0.0.1) {
+
+This forces the string returned by the SELECT to be treated as an IP
+address, and compare to ``127.0.0.1``.  Previously, the comparison
+would have been done as a simple string comparison.
+
+NETWORKS
+
+IP networks are now supported::
+
+  if (127.0.0.1/32 == 127.0.0.1) {
+
+Will be ``true``.  The various comparison operators can be used to
+check IP network membership::
+
+  if (127/8 > 127.0.0.1) {
+
+Returns ``true``, because ``127.0.0.1`` is within the ``127/8``
+network.  However, the following comparison will return ``false``::
+
+  if (127/8 > 192.168.0.1) {
+
+because ``192.168.0.1`` is outside of the ``127/8`` network.
+
+OPTIMIZATION
+
+As ``unlang`` is now pre-compiled, many compile-time optimizations are
+done.  This means that the debug output may not be exactly the same as
+what is in the configuration files::
+
+  if (0 && (User-Name == "bob')) {
+
+The result will always be ``false``, as the ``if 0`` prevents the
+following ``&& ...`` from being evaluated.
+
+Not only that, but the entire contents of that section will be ignored
+entirely::
+
+  if (0) {
+      this_module_does_not_exist
+      and_this_one_does_not_exist_either
+  }
+
+In v2, that configuration would result in a parse error, as there is
+no module called ``this_module_does_not_exist``.  In v3, that text is
+ignored.  This ability allows you to have dynamic configurations where
+certain parts are used (or not) depending on compile-time configuration.
+
+Similarly, conditions which always evaluate to ``true`` will be
+optimized away::
+
+  if (1) {
+      files
+  }
+
+That configuration will never show the ``if (1)`` output in debugging mode.
+
+
+Dialup_admin
+------------
+
+The dialip_admin directory has been removed.  No one stepped forward
+to maintain it, and the code had not been changed in many years.
+
diff --git a/src/test/setup/radius-config/freeradius/certs/01.pem b/src/test/setup/radius-config/freeradius/certs/01.pem
new file mode 100644
index 0000000..246df1b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/01.pem
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 1 (0x1)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:35 2016 GMT
+            Not After : Mar  6 18:53:35 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
+                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
+                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
+                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
+                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
+                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
+                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
+                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
+                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
+                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
+                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
+                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
+                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
+                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
+                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
+                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
+                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
+                    de:a9
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Server Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
+         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
+         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
+         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
+         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
+         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
+         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
+         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
+         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
+         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
+         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
+         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
+         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
+         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
+         8b:78:9b:6f
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/02.pem b/src/test/setup/radius-config/freeradius/certs/02.pem
new file mode 100644
index 0000000..1197fec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/02.pem
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 2 (0x2)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:36 2016 GMT
+            Not After : Mar  6 18:53:36 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
+                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
+                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
+                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
+                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
+                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
+                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
+                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
+                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
+                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
+                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
+                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
+                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
+                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
+                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
+                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
+                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
+                    6b:ff
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Client Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
+         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
+         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
+         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
+         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
+         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
+         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
+         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
+         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
+         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
+         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
+         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
+         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
+         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
+         c2:15:2d:ca
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/07a45775.0 b/src/test/setup/radius-config/freeradius/certs/07a45775.0
new file mode 120000
index 0000000..799a1c6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/07a45775.0
@@ -0,0 +1 @@
+client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/19a996e2.0 b/src/test/setup/radius-config/freeradius/certs/19a996e2.0
new file mode 120000
index 0000000..799a1c6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/19a996e2.0
@@ -0,0 +1 @@
+client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/34e00910.0 b/src/test/setup/radius-config/freeradius/certs/34e00910.0
new file mode 120000
index 0000000..55f0c91
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/34e00910.0
@@ -0,0 +1 @@
+01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/865470fd.0 b/src/test/setup/radius-config/freeradius/certs/865470fd.0
new file mode 120000
index 0000000..e375f5a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/865470fd.0
@@ -0,0 +1 @@
+ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/8fe581ba.0 b/src/test/setup/radius-config/freeradius/certs/8fe581ba.0
new file mode 120000
index 0000000..55f0c91
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/8fe581ba.0
@@ -0,0 +1 @@
+01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/Makefile b/src/test/setup/radius-config/freeradius/certs/Makefile
new file mode 100644
index 0000000..c8f0892
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/Makefile
@@ -0,0 +1,140 @@
+######################################################################
+#
+#	Make file to be installed in /etc/raddb/certs to enable
+#	the easy creation of certificates.
+#
+#	See the README file in this directory for more information.
+#
+#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
+#
+######################################################################
+
+DH_KEY_SIZE	= 1024
+
+#
+#  Set the passwords
+#
+PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
+
+USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
+CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
+
+######################################################################
+#
+#  Make the necessary files, but not client certificates.
+#
+######################################################################
+.PHONY: all
+all: index.txt serial dh random server ca client
+
+.PHONY: client
+client: client.pem
+
+.PHONY: ca
+ca: ca.der
+
+.PHONY: server
+server: server.pem server.vrfy
+
+######################################################################
+#
+#  Diffie-Hellman parameters
+#
+######################################################################
+dh:
+	openssl dhparam -out dh $(DH_KEY_SIZE)
+
+######################################################################
+#
+#  Create a new self-signed CA certificate
+#
+######################################################################
+ca.key ca.pem: ca.cnf
+	@[ -f index.txt ] || $(MAKE) index.txt
+	@[ -f serial ] || $(MAKE) serial
+	openssl req -new -x509 -keyout ca.key -out ca.pem \
+		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
+
+ca.der: ca.pem
+	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
+
+######################################################################
+#
+#  Create a new server certificate, signed by the above CA.
+#
+######################################################################
+server.csr server.key: server.cnf
+	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
+
+server.crt: server.csr ca.key ca.pem
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
+
+server.p12: server.crt
+	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+server.pem: server.p12
+	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+.PHONY: server.vrfy
+server.vrfy: ca.pem
+	@openssl verify -CAfile ca.pem server.pem
+
+######################################################################
+#
+#  Create a new client certificate, signed by the the above server
+#  certificate.
+#
+######################################################################
+client.csr client.key: client.cnf
+	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
+
+client.crt: client.csr ca.pem ca.key
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
+
+client.p12: client.crt
+	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+
+client.pem: client.p12
+	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+	cp client.pem $(USER_NAME).pem
+
+.PHONY: client.vrfy
+client.vrfy: ca.pem client.pem
+	c_rehash .
+	openssl verify -CApath . client.pem
+
+######################################################################
+#
+#  Miscellaneous rules.
+#
+######################################################################
+index.txt:
+	@touch index.txt
+
+serial:
+	@echo '01' > serial
+
+random:
+	@if [ -c /dev/urandom ] ; then \
+		ln -sf /dev/urandom random; \
+	else \
+		date > ./random; \
+	fi
+
+print:
+	openssl x509 -text -in server.crt
+
+printca:
+	openssl x509 -text -in ca.pem
+
+clean:
+	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
+
+#
+#	Make a target that people won't run too often.
+#
+destroycerts:
+	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
+			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs/README b/src/test/setup/radius-config/freeradius/certs/README
new file mode 100644
index 0000000..f7e0591
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/README
@@ -0,0 +1,226 @@
+  This directory contains scripts to create the server certificates.
+To make a set of default (i.e. test) certificates, simply type:
+
+$ ./bootstrap
+
+  The "openssl" command will be run against the sample configuration
+files included here, and will make a self-signed certificate authority
+(i.e. root CA), and a server certificate.  This "root CA" should be
+installed on any client machine needing to do EAP-TLS, PEAP, or
+EAP-TTLS.
+
+  The Microsoft "XP Extensions" will be automatically included in the
+server certificate.  Without those extensions Windows clients will
+refuse to authenticate to FreeRADIUS.
+
+  The root CA and the "XP Extensions" file also contain a crlDistributionPoints
+attribute. The latest release of Windows Phone needs this to be present
+for the handset to validate the RADIUS server certificate. The RADIUS
+server must have the URI defined but the CA need not have...however it
+is best practice for a CA to have a revocation URI. Note that whilst
+the Windows Mobile client cannot actually use the CRL when doing 802.1X
+it is recommended that the URI be an actual working URL and contain a
+revocation format file as there may be other OS behaviour at play and
+future OSes that may do something with that URI.
+
+  In general, you should use self-signed certificates for 802.1x (EAP)
+authentication.  When you list root CAs from other organisations in
+the "ca_file", you permit them to masquerade as you, to authenticate
+your users, and to issue client certificates for EAP-TLS.
+
+  If FreeRADIUS was configured to use OpenSSL, then simply starting
+the server in root in debugging mode should also create test
+certificates, i.e.:
+
+$ radiusd -X
+
+  That will cause the EAP-TLS module to run the "bootstrap" script in
+this directory.  The script will be executed only once, the first time
+the server has been installed on a particular machine.  This bootstrap
+script SHOULD be run on installation of any pre-built binary package
+for your OS.  In any case, the script will ensure that it is not run
+twice, and that it does not over-write any existing certificates.
+
+  If you already have CA and server certificates, rename (or delete)
+this directory, and create a new "certs" directory containing your
+certificates.  Note that the "make install" command will NOT
+over-write your existing "raddb/certs" directory, which means that the
+"bootstrap" command will not be run.
+
+
+		NEW INSTALLATIONS OF FREERADIUS
+
+
+  We suggest that new installations use the test certificates for
+initial tests, and then create real certificates to use for normal
+user authentication.  See the instructions below for how to create the
+various certificates.  The old test certificates can be deleted by
+running the following command:
+
+$ rm -f *.pem *.der *.csr *.crt *.key *.p12 serial* index.txt*
+
+  Then, follow the instructions below for creating real certificates.
+
+  Once the final certificates have been created, you can delete the
+"bootstrap" command from this directory, and delete the
+"make_cert_command" configuration from the "tls" sub-section of
+eap.conf.
+
+  If you do not want to enable EAP-TLS, PEAP, or EAP-TTLS, then delete
+the relevant sub-sections from the "eap.conf" file.
+
+
+		MAKING A ROOT CERTIFICATE
+
+
+$ vi ca.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the CA certificate.
+
+  Edit the [certificate_authority] section to have the correct values
+  for your country, state, etc.
+
+$ make ca.pem
+
+  This step creates the CA certificate.
+
+$ make ca.der
+
+  This step creates the DER format of the self-signed certificate,
+  which is can be imported into Windows.
+
+
+		MAKING A SERVER CERTIFICATE
+
+
+$ vi server.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the server certificate.
+
+  Edit the [server] section to have the correct values for your
+  country, state, etc.  Be sure that the commonName field here is
+  different from the commonName for the CA certificate.
+
+$ make server.pem
+
+  This step creates the server certificate.
+
+  If you have an existing certificate authority, and wish to create a
+  certificate signing request for the server certificate, edit
+  server.cnf as above, and type the following command.
+
+$ make server.csr
+
+  You will have to ensure that the certificate contains the XP
+  extensions needed by Microsoft clients.
+
+
+		MAKING A CLIENT CERTIFICATE
+
+
+  Client certificates are used by EAP-TLS, and optionally by EAP-TTLS
+and PEAP.  The following steps outline how to create a client
+certificate that is signed by the server certificate created above.
+You will have to have the password for the server certificate in the
+"input_password" and "output_password" fields of the server.cnf file.
+
+
+$ vi client.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the client certificate.  You will have to give these
+  passwords to the end user who will be using the certificates.
+
+  Edit the [client] section to have the correct values for your
+  country, state, etc.  Be sure that the commonName field here is
+  the User-Name that will be used for logins!
+
+$ make client.pem
+
+  The users certificate will be in "emailAddress.pem",
+  i.e. "user@example.com.pem".
+
+  To create another client certificate, just repeat the steps for
+  making a client certificate, being sure to enter a different login
+  name for "commonName", and a different password.
+
+
+		PERFORMANCE
+
+
+  EAP performance for EAP-TLS, TTLS, and PEAP is dominated by SSL
+  calculations.  That is, a normal system can handle PAP
+  authentication at a rate of 10k packets/s.  However, SSL involves
+  RSA calculations, which are very expensive.  To benchmark your system,
+  do:
+
+$ openssl speed rsa
+
+  or
+
+$ openssl speed rsa2048
+
+  to test 2048 bit keys.
+
+  A 1GHz system will likely do 30 calculations/s.  A 2GHz system may
+  do 50 calculations/s, or more.  That number is also the number of
+  authentications/s that can be done for EAP-TLS (or TTLS, or PEAP).
+
+
+		COMPATIBILITY
+
+The certificates created using this method are known to be compatible
+with ALL operating systems.  Some common issues are:
+
+  - Windows requires certain OIDs in the certificates.  If it doesn't
+    see them, it will stop doing EAP.  The most visible effect is
+    that the client starts EAP, gets a few Access-Challenge packets,
+    and then a little while later re-starts EAP.  If this happens, see
+    the FAQ, and the comments in raddb/eap.conf for how to fix it.
+
+  - Windows requires the root certificates to be on the client PC.
+    If it doesn't have them, you will see the same issue as above.
+
+  - Windows XP post SP2 has a bug where it has problems with
+    certificate chains.  i.e. if the server certificate is an
+    intermediate one, and not a root one, then authentication will
+    silently fail, as above.
+
+  - Some versions of Windows CE cannot handle 4K RSA certificates.
+    They will (again) silently fail, as above.
+
+  - In none of these cases will Windows give the end user any
+    reasonable error message describing what went wrong.  This leads
+    people to blame the RADIUS server.  That blame is misplaced.
+
+  - Certificate chains of more than 64K bytes are known to not work.
+    This is a problem in FreeRADIUS.  However, most clients cannot
+    handle 64K certificate chains.  Most Access Points will shut down
+    the EAP session after about 50 round trips, while 64K certificate
+    chains will take about 60 round trips.  So don't use large
+    certificate chains.  They will only work after everyone upgrade
+    everything in the network.
+
+  - All other operating systems are known to work with EAP and
+    FreeRADIUS.  This includes Linux, *BSD, Mac OS X, Solaris,
+    Symbian, along with all known embedded systems, phones, WiFi
+    devices, etc.
+
+  - Someone needs to ask Microsoft to please stop making life hard for
+    their customers.
+
+
+		SECURITY CONSIDERATIONS
+
+The default certificate configuration files uses MD5 for message
+digests, to maintain compatibility with network equipment that
+supports only this algorithm.
+
+MD5 has known weaknesses and is discouraged in favour of SHA1 (see
+http://www.kb.cert.org/vuls/id/836068 for details). If your network
+equipment supports the SHA1 signature algorithm, we recommend that you
+change the "ca.cnf", "server.cnf", and "client.cnf" files to specify
+the use of SHA1 for the certificates. To do this, change the
+'default_md' entry in those files from 'md5' to 'sha1'.
diff --git a/src/test/setup/radius-config/freeradius/certs/bootstrap b/src/test/setup/radius-config/freeradius/certs/bootstrap
new file mode 100755
index 0000000..82f93ec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/bootstrap
@@ -0,0 +1,82 @@
+#!/bin/sh
+#
+#  This is a wrapper script to create default certificates when the
+#  server first starts in debugging mode.  Once the certificates have been
+#  created, this file should be deleted.
+#
+#  Ideally, this program should be run as part of the installation of any
+#  binary package.  The installation should also ensure that the permissions
+#  and owners are correct for the files generated by this script.
+#
+#  $Id: c9d939beac8d5bdc21ea1ff9233442f9ab933297 $
+#
+umask 027
+cd `dirname $0`
+
+make -h > /dev/null 2>&1
+
+#
+#  If we have a working "make", then use it.  Otherwise, run the commands
+#  manually.
+#
+if [ "$?" = "0" ]; then
+  make all
+  exit $?
+fi
+
+#
+#  The following commands were created by running "make -n", and edited
+#  to remove the trailing backslash, and to add "exit 1" after the commands.
+#
+#  Don't edit the following text.  Instead, edit the Makefile, and
+#  re-generate these commands.
+#
+if [ ! -f dh ]; then
+  openssl dhparam -out dh 1024 || exit 1
+  if [ -e /dev/urandom ] ; then
+	ln -sf /dev/urandom random
+  else
+	date > ./random;
+  fi
+fi
+
+if [ ! -f server.key ]; then
+  openssl req -new  -out server.csr -keyout server.key -config ./server.cnf || exit 1
+fi
+
+if [ ! -f ca.key ]; then
+  openssl req -new -x509 -keyout ca.key -out ca.pem -days `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'` -config ./ca.cnf || exit 1
+fi
+
+if [ ! -f index.txt ]; then
+  touch index.txt
+fi
+
+if [ ! -f serial ]; then
+  echo '01' > serial
+fi
+
+if [ ! -f server.crt ]; then
+  openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf || exit 1
+fi
+
+if [ ! -f server.p12 ]; then
+  openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
+fi
+
+if [ ! -f server.pem ]; then
+  openssl pkcs12 -in server.p12 -out server.pem -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
+  openssl verify -CAfile ca.pem server.pem || exit 1
+fi
+
+if [ ! -f ca.der ]; then
+  openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der || exit 1
+fi
+
+if [ ! -f client.key ]; then
+  openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
+fi
+
+if [ ! -f client.crt ]; then
+  openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
+fi
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.cnf b/src/test/setup/radius-config/freeradius/certs/ca.cnf
new file mode 100644
index 0000000..37207e8
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/ca.cnf
@@ -0,0 +1,62 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/ca.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/ca.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= certificate_authority
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+x509_extensions		= v3_ca
+
+[certificate_authority]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= admin@ciena.com
+commonName		= "Example Certificate Authority"
+
+[v3_ca]
+subjectKeyIdentifier	= hash
+authorityKeyIdentifier	= keyid:always,issuer:always
+basicConstraints	= CA:true
+crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
+
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.der b/src/test/setup/radius-config/freeradius/certs/ca.der
new file mode 100644
index 0000000..a505cfc
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/ca.der
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.key b/src/test/setup/radius-config/freeradius/certs/ca.key
new file mode 100644
index 0000000..f6ce685
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/ca.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQInjXhKnIFIgUCAggA
+MBQGCCqGSIb3DQMHBAh+B3FNG/y9LQSCBMhDZu1kDr0UGMnD8mpB7H319e8GLakT
+8jzPI+bxethA4ZthxY3x5Jxcvyhzy92pR7lCG2Sr8HOVhPpxmp3W5HhbrDhdOj+Q
+qy4Y00G2JCos2wVKTecAOgE5a3i2hDVJDsGxqfwqdohBUwhxVwGDxyzelClN3tNW
+xlj6YuPdUly5tmS1Jo0vtw94QtKk1N0JlNxkTz98vfvgxD4vHqMZugfV2EX2P985
+taRb2bX8VE5rh7CkNhYmYxyv5LACM+7IyM4yvUBfqJG0JPg4UKDVya1dm2mHb7+I
+6C7UcOM7phMZeHNT1gixzSl1UFEtBJaPgmxKIIyBUls7bgKOtNf+BNo+OTWfrnH+
+nvMAoEEPr0GT/fD1tpiR4JaRYXMUHrUt8kGw3Kayr2q9HYJuDeb1uwxK/ml+7aGN
+km9IEV/8Cc0/7TGSQR8jqS+evEy9Sv7tiB2rGnwB6hsbvT+l3jQdv/cX00vMPhRA
+g2KBqifiXRnZnYWlY1KAhZZm0BqJuohYko+xJ2yL2I5q8H7UooN+ND8nsaROOELq
+4FdwJd3MaHEgEnyPL5zFrCcv/R/v1GZNJGt0GT2marwPjLTkGcvDZvbX44a/InyV
+IlYZA/JNmWP6NlABhZIf/mzxnOWPjBwqq2y8Wg2PXpYrumj1nIUoO5B96YYo6lvS
+wlAjIYAxLy8IsUMURDfpvm2CCIW0aGTwO8YSyWnxLBa27MG8fWubjJafm+K4SOf3
+uLiBWlbrkCG7jvNL07/mnkjlP4n05Olb5nrpeTKOz3nklbQmsBhC5/OHZj7ZlUul
+gAR6/U3B0sefMsdqxuls3w8qfgrV1oQGATxvWgYs5zFa/bXBSN1L+brc2q+8ZtgR
+GkFIwnXPWiKB7GIlamER24a1nctR4vL+sYmpmlav+OS6n/jItTCYed+dQ5inC3hX
+4rdGiAjylaTDkW7k4dtIXGUJNGZbIxrpAqNYOVYrCyAEj+HdpNuTUUO2vohq+EM1
+og7SeLhsVg1bG3lYRaqZaXjsof2NAruFJ8aH93DcwoClxFjNJxOd9YAXIA83Uvz8
+D2Bu1/Z41Grq8O7YEnrYbxJP77G9PAgCLt2Uc16O91Lpg1gZ3gESX2BmuR38wbyv
+t5MoC1/oSBV+643yq2ldQRYOMSKl/CLoApywcatdHCIiDC3AEIklueG5jA9Diutl
+ZfK8XSpBEYPQm+eHLdfUOTTnF3SoNPDGbm102nKyvgmGpReFgREYyZSwvg/1YuL/
+m8S+lR+gmP3i9Y4/0UcccI24tO5s0FI4od/4BZ4NW9JsYKxCTj/WJCH4bpmjtmwK
+WI1XSxso1ueVQ7qJBVJyEsMa480nJ5GMKoEfzhqzGzvT1awcz5y/Q/4vIjGZVmR2
+GekRkn9uadPQnIsYGX99A5gPAXP+oCJ9MqLXZPdWLXdm0OybAkD++ryKfi3DNYq2
+TO4hcHLi7lEaIgDcOt+RWTkF0y6yZ3vnY6llvQTRF7fe+6R4YJg0On69+Lt6BoZw
+hmgaaR8YJl++eFWzCJjdJJrCPIiQginbGbpks2Zrz5hGGcQhNwomRX5DFVouePK5
+qhd54Myo2di+Fu0Ls86+nFwnIs9s1+c/2rDWzV1aRfEjnv3OUSLi1saoXjiunMBq
+/L4=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/ca.pem b/src/test/setup/radius-config/freeradius/certs/ca.pem
new file mode 100644
index 0000000..916cdf9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/ca.pem
@@ -0,0 +1,28 @@
+-----BEGIN CERTIFICATE-----
+MIIEyTCCA7GgAwIBAgIJAM6l2jUG56pLMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
+ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
+JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjAzMTEx
+ODUzMzVaFw0xNzAzMDYxODUzMzVaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
+Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
+CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
+cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL9Jv54TkqycL3U2Fdd/y5NXdnPVXwAVV3m6I3eIffVCv8eS+mwlbl9dnbjo
+qqlGEgA3sEg5HtnKoW81l3PSyV/YaqzUzbcpDlgWlbNkFQ3nVxh61gSU34Fc4h/W
+plSvCkwGSbV5udLtEe6S9IflP2Fu/eXa9vmUtoPqDk66p9U/nWVf2H1GJy7XanWg
+wke+HpQvbzoSfPJS0e5Rm9KErrzaIkJpqt7soW+OjVJitUax7h45RYY1HHHlbMQ0
+ndWW8UDsCxFQO6d7nsijCzY69Y8HarH4mbVtqhg3KJevxD9UMRy6gdtPMDZLah1c
+LHRu14ucOK4aF8oICOgtcD06auUCAwEAAaOCASwwggEoMB0GA1UdDgQWBBQwEs0m
+c8HARTVp21wtiwgav5biqjCBwAYDVR0jBIG4MIG1gBQwEs0mc8HARTVp21wtiwga
+v5biqqGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
+EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
+D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
+dXRob3JpdHmCCQDOpdo1BueqSzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
+oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
+hvcNAQELBQADggEBAK+fyAFO8CbH35P5mOX+5wf7+AeC+5pwaFcoCV0zlfwniANp
+jISgcIX9rcetLxeYRAO5com3+qLdd9dGVNL0kwufH4QhlSPErG7OLHHAs4JWVhUo
+bH3lK9lgFVlnCDBtQhslzqScR64SCicWcQEjv3ZMZsJwYLvl8unSaKz4+LVPeJ2L
+opCpmZw/V/S2NhBbe3QjTiRPmDev2gbaO4GCfi/6sCDU7UO3o8KryrkeeMIiFIej
+gfwn9fovmpeqCEyupy2JNNUTJibEuFknwx7JAX+htPL27nEgwV1FYtwI3qLiZqkM
+729wo9cFSslJNZBu+GsBP5LszQSuvNTDWytV+qY=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.cnf b/src/test/setup/radius-config/freeradius/certs/client.cnf
new file mode 100644
index 0000000..994d3ab
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.cnf
@@ -0,0 +1,53 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/ca.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/ca.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= client
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+
+[client]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= user@ciena.com
+commonName		= user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/client.crt b/src/test/setup/radius-config/freeradius/certs/client.crt
new file mode 100644
index 0000000..1197fec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.crt
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 2 (0x2)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:36 2016 GMT
+            Not After : Mar  6 18:53:36 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
+                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
+                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
+                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
+                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
+                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
+                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
+                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
+                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
+                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
+                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
+                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
+                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
+                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
+                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
+                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
+                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
+                    6b:ff
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Client Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
+         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
+         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
+         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
+         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
+         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
+         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
+         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
+         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
+         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
+         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
+         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
+         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
+         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
+         c2:15:2d:ca
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.csr b/src/test/setup/radius-config/freeradius/certs/client.csr
new file mode 100644
index 0000000..8f8a518
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICwDCCAagCAQAwezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
+EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHTAbBgkqhkiG9w0BCQEW
+DnVzZXJAY2llbmEuY29tMRcwFQYDVQQDFA51c2VyQGNpZW5hLmNvbTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgC
+NXXl2VFzKLNNvB9PS6D7ZBsQ5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlG
+MQJUbRD9V/oqszMX4k++iAOKtIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5
++CY9WAicXyy+VEV3zTphZZDROjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tI
+gZb48RS1NPyN/XkCYzl3bv21qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9
+uBLBzcQt+4Rd5pLSfi21WM392Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaAA
+MA0GCSqGSIb3DQEBCwUAA4IBAQB030zqg/C6+0rwf+nsdQJvpUGFVCT3WJRf7Qx5
+NC3n6hfetLHs7XjPZ77CI2B1VEPE7r55Mv1m81b1+2WO/jFQXlM52CteOSLy/Zsj
+lUBW4naaCa+C3liOn1cSONNClvKMGl2DcTbOFO8j9A3dAOHUR05SeAtGutVip9CS
+NPl36MmwFUO0p25UkmG4IJIZPVaMEjqEPVjWxnRFrajFwsbyMkHEFIEvQ/TP1qpN
+LzLmp+Y4flS4O7zC3AAt4Zayr4AC5cf4JKDJxxfZ+qE0KS7jV4bJdo5hxpGz4ECC
+/LDZPZN9oGr67bNSjM4+Ogdx5v4Huojn/lQPK4gTME8SIUPX
+-----END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.key b/src/test/setup/radius-config/freeradius/certs/client.key
new file mode 100644
index 0000000..a2e92c3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIY2/Sy6WJDBYCAggA
+MBQGCCqGSIb3DQMHBAgzT2/EDIfXJASCBMjE2epab8bFXVgs1E2P02G/LzexUvvO
+gH9GltVbzVSmsuWNKaGBqWvRNiKrIiGIBQpNZsdV/0ae/5Etw3qkvcO/vTiPAOgC
+3+vkaNZMIpI4jePvvCzeUlnybg4+WBq6aXZLRQc8uCWkApH/HfcnwxCKEKebPqj5
+F1xmzT8WOSRJYytrwzU7GWQtsDwFCv0KnTJeYni9CVLIX8jFLtMB1mRhYZ93eSiM
+DjsSr2OH/AOiZQEzCv5YMbDk4WD9L1MD5S62bpWxdwG/aEr8E1dI1Z2TJHzx75dR
+lWdoV1BQHfKmsQRtwnZ/Hq6zmzY+SStJGFUcRdBdLdJrfRcIyTJXVkFYoVMM/PDl
+UT4K0pIcDILH7jPNp7kuDfSDigFNvqk4O6GybN+TT7cQKH5oGtEsvGSOfUYZUEvp
+KV4rpyR+n3NPC1tEoOvfuGlqHDGN62pdTVhFM/FqFbZSEoTzlSU+OecLiQrGtS6T
+hrmWt/go20MxTlWh87L0s9SRalP4A2YkufHBst8oSgwI4DzVhifqqWD87w7iL0Ur
+6drgbtlM2hY3onkwS2+oSzEIIlwLwaBaAt2hnVosgZIQajcmlayIhRQ1SNsYYj2T
+YTTTYxPWwUaIYzOl7Ri1OoD5dSFY84sUAD7odLMpzmEJQIi31KYIdOs1BN5oDpFV
+GbcKtF7sKw2QBb8nZgADobpCHIJIG/SLNqx4UgSZYgLVUgW0xaS8+8ylVLqRkIjM
+yoGkxqezc2pvCAbH8BMGYaZei5TL9GHanae+t6caBK9Zty6m9bdT9H9EkC6NEWhX
+IuKGZjyq/+O1mFK/66ts+tq9mynyZfVxxAKINijGLEWKPL0KAZkZIfFnCfXO7kK/
+JJNp5zE8GX9FFT5w8sq2UTsfS/F6K8kih+gZVJtj4irnWiABLq4VQjBRPeJJFt5Q
+Zki48dH5JP1/0222mka+ynRfv6pAtSN1Y5vx2mDPNoxiajhfMoLAxFkwwUYA3AfI
+DMTByk7n27HfWtmkUV+Zx263NVkCU0/BjOE3j7N1OojSuCizJRIT199hRhmnTFoy
+FPRrmYF4g/HU1ca6u8If5JzZAfJIqN8H9oHHTdWg5HuU31NpQPHgQqRGvaaBpuwc
+oglzg6mhl/4dUlZiw6l7bJGMojid24iTMgt6FkUqma1ECQ2wp3SF06u7iFecB78B
+aKJhOGOF1GHc0DMHNwLfSw1wIBah5K1SFm4JQyEYrG/KeRGXRKKGlKPGTKQPQRCU
+LCqbWnFMPBW5b/V/Xv02BBemgwp44RsFDQo6AVp6zbzWwh84oLrFSuGMK7aGynGA
+/MzGAmrC5jbIC62WAejlSj51o6sHoQNjn59PULZWqsbfD0DWH1DXeGqzLNd0phx7
+v1yDjLVq7J64YNYtxctZ+G54Pkg5wHTyx+dt3gKi/wVSc/cOHNDC2QxWhvSxL0cp
+/QpgggxaADcPZkvQe2/34wbqBTYbz9j+PODuad8hrqSLMLzX5iIFqE8qOYNPwH8z
+Lz66G4k3kp307/+0pocIRASn9dtX0PgpGyKo1hvg3zYNP+ObRPOT06Zx1HhEIx2S
+7oQXaQNDJpZd5tO+s7RY80ficybUe7wC4BnqNaoxVluBaIEA2NdiPHOiL5Sh0sme
+0oI=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/client.p12 b/src/test/setup/radius-config/freeradius/certs/client.p12
new file mode 100644
index 0000000..d1289a9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.p12
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/client.pem b/src/test/setup/radius-config/freeradius/certs/client.pem
new file mode 100644
index 0000000..6dc7d9b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/client.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
+MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
+Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
+d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
+Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
+ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
+1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
+vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
+gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
+cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
+RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
+8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
+nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
+aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
+XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
+vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
+aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
+QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
+U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
+AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
+Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
+T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
+0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
+ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
+sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
+VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
+B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
+uGo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/dh b/src/test/setup/radius-config/freeradius/certs/dh
new file mode 100644
index 0000000..e7b4f90
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/dh
@@ -0,0 +1,5 @@
+-----BEGIN DH PARAMETERS-----
+MIGHAoGBAKHERxCGYaLWD6ay09DuGxxs5whd4zFUS1pjA7jEvGwnbISSzGvzRbYi
+ymNeNgzrZhHiWo5GC008yLvUy0qxVMny0x+7xybup+mOv6ITEz+HuhlsBN+Aqc5P
+Oyq7h1qnuy8UiiEP87YcwhCFooQ3I8dCcMT7AVApYex4K81Sck/LAgEC
+-----END DH PARAMETERS-----
diff --git a/src/test/setup/radius-config/freeradius/certs/fef12f18.0 b/src/test/setup/radius-config/freeradius/certs/fef12f18.0
new file mode 120000
index 0000000..e375f5a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/fef12f18.0
@@ -0,0 +1 @@
+ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt b/src/test/setup/radius-config/freeradius/certs/index.txt
new file mode 100644
index 0000000..27c2c7a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/index.txt
@@ -0,0 +1,2 @@
+V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
+V	170306185336Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.attr b/src/test/setup/radius-config/freeradius/certs/index.txt.attr
new file mode 100644
index 0000000..8f7e63a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/index.txt.attr
@@ -0,0 +1 @@
+unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old b/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old
new file mode 100644
index 0000000..8f7e63a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/index.txt.attr.old
@@ -0,0 +1 @@
+unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs/index.txt.old b/src/test/setup/radius-config/freeradius/certs/index.txt.old
new file mode 100644
index 0000000..f0ce0ce
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/index.txt.old
@@ -0,0 +1 @@
+V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs/random b/src/test/setup/radius-config/freeradius/certs/random
new file mode 120000
index 0000000..a222f14
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/random
@@ -0,0 +1 @@
+/dev/urandom
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs/serial b/src/test/setup/radius-config/freeradius/certs/serial
new file mode 100644
index 0000000..75016ea
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/serial
@@ -0,0 +1 @@
+03
diff --git a/src/test/setup/radius-config/freeradius/certs/serial.old b/src/test/setup/radius-config/freeradius/certs/serial.old
new file mode 100644
index 0000000..9e22bcb
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/serial.old
@@ -0,0 +1 @@
+02
diff --git a/src/test/setup/radius-config/freeradius/certs/server.cnf b/src/test/setup/radius-config/freeradius/certs/server.cnf
new file mode 100644
index 0000000..444372d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.cnf
@@ -0,0 +1,54 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/server.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/server.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= server
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+
+[server]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= admin@ciena.com
+commonName		= "Example Server Certificate"
+
diff --git a/src/test/setup/radius-config/freeradius/certs/server.crt b/src/test/setup/radius-config/freeradius/certs/server.crt
new file mode 100644
index 0000000..246df1b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.crt
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 1 (0x1)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:35 2016 GMT
+            Not After : Mar  6 18:53:35 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
+                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
+                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
+                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
+                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
+                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
+                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
+                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
+                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
+                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
+                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
+                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
+                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
+                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
+                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
+                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
+                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
+                    de:a9
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Server Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
+         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
+         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
+         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
+         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
+         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
+         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
+         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
+         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
+         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
+         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
+         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
+         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
+         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
+         8b:78:9b:6f
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.csr b/src/test/setup/radius-config/freeradius/certs/server.csr
new file mode 100644
index 0000000..d055b9e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.csr
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICzjCCAbYCAQAwgYgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UE
+BxMJU29tZXdoZXJlMRMwEQYDVQQKEwpDaWVuYSBJbmMuMR4wHAYJKoZIhvcNAQkB
+Fg9hZG1pbkBjaWVuYS5jb20xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRp
+ZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp5s9so9t3VXH
+NFqLx3io/xT6IQ5gGwyHNvEHOszxip0jTDGNgZILH7L5b1V5w/0Yj5mni4xBGKYC
+CMu1W4u3I6NtIKns7r/68ZnXBzWhO+let4SK211GFT4fki0S20zDqhPH3S2gCtI8
+WRn6fNmltBa9gro1R8Tc+69h8XDYsyzvkSDF1a+3rF0VTupkqwuz7iV+qqigpTYu
+We24xwJPq5vnUEwwFE1IGqKIBW5+gu/4xXC12D6u9uAuaLpS0+U6LQ/dQ4Y5ta9b
+w4Z7mHh/1Zvun+RQXgOeKWf1eDWx0+JmLWg2wjDJBsIcc5vDCSi6CLj1SeBb0UPZ
+OAZHMqLeqQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAI4JSSggafFTzjYU4n9s
+lKYCCy8+MQ2X9eEKcsBwDiPvXmJdeWJTFYfBHE5p6spyA3IULxb9N90Kajdf287L
+e1Gurh4XuLd2gv/UAR4fpYJ6r0jJviWAe77R1cuJ+QvQWOaSWzJmxtZkO7OdBl0A
+XmksiRLnKu0mGEcGSQQ3vfdwDTGqpfLuSn9U6B8FoK7AjkeB1oKf6UgHnbN01UKp
+ubExjX4lNHLLiNrXjBkyDpW5zBbbhEaPdnDLHvNnd6fTkd7F3Jt5timmrm9hKMMB
+hE7qLyiBoSdqFejZEPjcvJGV42sNetREqIrWnvsXrox+7P+5z8+uowebLXGohfJC
+hmc=
+-----END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.key b/src/test/setup/radius-config/freeradius/certs/server.key
new file mode 100644
index 0000000..63be1bd
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI9q05vTmS4WYCAggA
+MBQGCCqGSIb3DQMHBAi4iVH6BL89ZQSCBMiK+P9gWMj1xFZqEQZ/VUTB0bt6YM8q
+nc1otC9KdWVCl5zqdV3vC0BdLMGv3Xem+u/ycWyrSsqDD3RzFqIjIxJTsAQRMKwr
+Fu4sNBMyAh0TCzVtf73QjiRg8Jtkf4UbTfJzNo1J3rjglnkSJ+9rCAYT4Ei84opN
+T/pdlhw9uRRsz7v+HRzajcpyw6FxtjLOUI2PaG8Lyrlrgt6uP1PvurK2+jexZ8o6
+OWIo5kbrn/rpzTiHWNgRoWnT71J5/lXE8hkjtv/5WAuncPAaUVdo0nKg58RD66St
+MOfQKlISeOdNw0yUWNPKkr98Tnp+fSUFHV4NCpMoV7mgab16grd8XR4qnOYuq8Ay
+9m0kzvffeASJj9hmpRDrZGrPXijNCRtEE8WQv3tLAYRaH180m6qCr7cOCS89LZZ4
+sVEIiAsOgCuAX3E3PGrdFbsGR6MnRpoHNxtUkD5g/b//8HTJ7b0EMKp00VTuHQRH
+JxxTZnbPSmsHJ+RmKL1K3eHqCDXuTPVFdDh82mabd/EiSdfj13+8etMQrF62XhDw
+r/2ElsO1yIPkXg9+FuC67EIBkYEbpuCXkvqYeuYEskEtoSDCj5yoX/aNJUkVImA3
+zveRCH8GMD0kaIf9IQdQ1jJxUGc3ZWFo6MIFAUD5eGXfwWX1x11sFJP2uBdf+31A
+0GhFICUaziHcDrHtqp5/nzo8f0hh+y3zXLx/Mf+WMC0Nirh7nyMoEmeNufYZtdvI
+5u90rYiPr7yS8vQD1R0LQZnODmtx0akn9HAtFvGzFbfa6x+2RoPpDiKS43ZCQPeW
+8JhWakNKijzfl2vufVUtSDZ5cPg5oyTH2NMw+DAgxqowtPmYV9J+ecZ9akwKk1Uz
+cLpNPrDmdUCyfztU5tlfTIdduafj9eIIgvVZs9wajlEWvooMW7cwbKYA0I5wYdq2
+lqFvnJtngUuvykYPFLg/ME+bXbdmQ6M91HpxOqUKp1feX4TW6yDlStpA40vPO3iB
+HmfL1DW3O4JTmvBwdoLPYoL5vP3/st51vXMXUcnyjHAzCa4HXj80PWyBsCM6S/iT
+SJtieMXSLw7R30D5boXncQS/fBCsdJpEpz2GyjJUn2RLbYJ3OsQbXB0eCaL7y9LL
+hGVK5Ez/HWjZ7Q6WRotVjeO5yRIgzWe4VRV58CVOH2CIkf1ODolzhREyzSBCGD6Q
+5rOZSAd21aStrNWQ02nYPXZbcnTo1LQImonSQ4SJZg0lsRSHfahmXkKafyYg5U8E
+jiff1uzSWWtmSZkY46S4dzQOZsY97k8cChliSnY1Jk8mh/5D9ehLxalUNMv0DIN/
+yTgYmC1TasTdchkSZdEyli8xvGWcmMKC+A5ycfRyE2mPxuEL6nQq4MAH7Yie9g7T
+Fzamniy0SXT08yXu2oFhi7VLyxSbGvIBQqE06rh2NVgt+N1eRSa/SJlkB6iqEmEA
+X+4b3D3s+ST6bZ19b6PP1t4tbfpGZ3LGezndpY4GqgfsUi5hdQcdfRjknCyFRZKm
+Qqi43ojk1xsdUHt/q0Y4RFHMtR5oQTapRXybQBRbzS7KCiRsH356ACowvV0UCNg2
+WzfFm3uozQO6NJCfWePdkfVrxU0p4q9s9QxxDX5SApQpqcwt0rJiDOzXvxKH8jx6
+qHo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/server.p12 b/src/test/setup/radius-config/freeradius/certs/server.p12
new file mode 100644
index 0000000..352d346
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.p12
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs/server.pem b/src/test/setup/radius-config/freeradius/certs/server.pem
new file mode 100644
index 0000000..b8b70f5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/server.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIQUZafKqU+44CAggA
+MBQGCCqGSIb3DQMHBAhxfosFDCxaJwSCBMhbvtkYjR1vowEixVQS2J5vL4EhHv7x
+ImI1vnLIbjY6HmUGuOolLidZQ7ieG1hrElat3gPCgw7zfmZyS3DOnLTxBAZRlOxK
+3zkBnegVGNbOnnsLJJX52JSDJvGnlUPWg3r8UhFp3aPH1eqUyt0bTySgpsSdt3yD
+/oWymM1bQW65KfKW3cskR+oKyqjh4rQevyTf5dR2r4noVfR96RqdJWJ95ag40GXN
+gpLDBTZD+iuMN1PiH9CraJWbRIWQOM0ieC79wGZ57V5tzgHENNey2itgwJ93iCj0
+Ay4f0HUEOuqJ7kK1fYEo+MUBt5TzpLBygGIVgbusz57C6DgCHwhBFtLS952GkmEP
+CAKM9I7wWR3w0Mj5maz4kq2hSzou1j81+ivxSkXMEGsCfwbrjY1QIitZdeEu31ti
+uf9+Jx2tK2yIu7+MLnMnpB7vdXrrPT6wipGMBe8a1/sczE2/foW0e2VarQIuS8bt
+fVpnfXT91Mf0DVn6Bc+ZI3EMG555Ah7GqbVztAlRm6IpbpFyFixx8m6oBwYc/ik6
+fReFzBOq+hV9VPAwYkzGlR+6hhfxhCUyE89LmB2z+fJvEMRj+X8LG21bHTkJoymp
+E/a4NIvOZv1vE3PpK7quZDm9HT/hdTsXbqlfbIBPcpJyUSbTcdBX2jcXfTz0od8Z
+e1iNlQ93d8FHuZFbtYiiZRSWGHPXI3sc96qY12cbUftZy20eN2esn37l13mDi0uS
+Qn0lAQFQwnEF4RROSSoLJefXc9kNXxq2cgZ/rWuUerwQQfMWU5tPwDS5UEoJjQg3
+eK2GH8YMoUuS178X9IU8cXD6vFkSOQ4uZ7L8sY7YHxqo8FeKW+YA7j5U8aNkVC3X
+crlV7VAbfd5k8NDaNe39dM8YNfJre6yBF8Wbvh6HV2a2JgzeQHQPXqLIKC27MCCY
+67P/IHmTis5Yz/tDKwO19N463VrDC6wno6fQdeNe5j3j29/y3YAkJweUtvXCYYJ6
+MOBh5hM+jMJWNSnfERUhjzp+FDoVzZgcxZ8OKbkOr6QZo3WBC7ogoJAVIaNS9Kl+
+RXUhdEd2uoYzwcNEmE9EqRTs8+Yy4VlgPS2iHWy+lboa+1Fi4yAZzeTmAd/BLQNB
+kLUI4OzniBtHn0C4oHz+Lfkm24t5iR5pxIWhNnOOxS0gkObtyWPlcC3LXYZ85ude
+mR8265l5FP9jabzfnCfoZWtdnIBUNcwAcB5oCdChipfJobXrmjyp5W8Sw8enr0BU
+ZJ2MwTGufoeQ3t2IsybY82TuXB6aLegevH3xC4kJV3We83LcUxNhkqmycU935ew0
+cJVQO8C3J5U4Pha8tn1+mKvDaKcv4HmG0YZyN48tdOtR1y4+Xzhq9hSwKl+rzG1Y
+TP0mW1fNfHRDrbykxkIpAXay9kDtfafalMI3ShndZfYiYJBe8IB+m9NML/lEPQyC
+fHH3xPNixHu74a71b6xgMZFhrrXBikfMUB1qroWa+9ocy/5LvdfCRIQN+ti7Tb4F
+FH5qzP/qAfjEdejuIdHHKNs/wkhTixqi8QCkDWEXkDj8AsiVmiBva6luSuQ31OiT
+ERZmRhkZfpkKmo4Jgc12dNsOqXYPF2KJ16bSElfuY5PGYR8JEw9Tz1k1UaMmrOGR
+guU=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem b/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem
new file mode 100644
index 0000000..6dc7d9b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/user@ciena.com.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
+MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
+Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
+d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
+Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
+ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
+1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
+vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
+gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
+cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
+RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
+8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
+nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
+aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
+XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
+vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
+aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
+QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
+U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
+AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
+Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
+T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
+0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
+ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
+sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
+VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
+B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
+uGo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs/xpextensions b/src/test/setup/radius-config/freeradius/certs/xpextensions
new file mode 100644
index 0000000..8e4a9a2
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs/xpextensions
@@ -0,0 +1,24 @@
+#
+#  File containing the OIDs required for Windows.
+#
+#  http://support.microsoft.com/kb/814394/en-us
+#
+[ xpclient_ext]
+extendedKeyUsage = 1.3.6.1.5.5.7.3.2
+crlDistributionPoints = URI:http://www.example.com/example_ca.crl
+
+[ xpserver_ext]
+extendedKeyUsage = 1.3.6.1.5.5.7.3.1
+crlDistributionPoints = URI:http://www.example.com/example_ca.crl
+
+#
+#  Add this to the PKCS#7 keybag attributes holding the client's private key
+#  for machine authentication.
+#
+#  the presence of this OID tells Windows XP that the cert is intended
+#  for use by the computer itself, and not by an end-user.
+#
+#  The other solution is to use Microsoft's web certificate server
+#  to generate these certs.
+#
+# 1.3.6.1.4.1.311.17.2
diff --git a/src/test/setup/radius-config/freeradius/certs_2/01.pem b/src/test/setup/radius-config/freeradius/certs_2/01.pem
new file mode 100644
index 0000000..246df1b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/01.pem
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 1 (0x1)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:35 2016 GMT
+            Not After : Mar  6 18:53:35 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
+                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
+                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
+                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
+                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
+                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
+                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
+                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
+                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
+                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
+                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
+                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
+                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
+                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
+                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
+                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
+                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
+                    de:a9
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Server Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
+         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
+         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
+         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
+         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
+         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
+         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
+         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
+         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
+         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
+         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
+         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
+         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
+         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
+         8b:78:9b:6f
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/02.pem b/src/test/setup/radius-config/freeradius/certs_2/02.pem
new file mode 100644
index 0000000..1197fec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/02.pem
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 2 (0x2)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:36 2016 GMT
+            Not After : Mar  6 18:53:36 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
+                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
+                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
+                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
+                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
+                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
+                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
+                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
+                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
+                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
+                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
+                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
+                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
+                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
+                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
+                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
+                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
+                    6b:ff
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Client Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
+         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
+         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
+         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
+         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
+         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
+         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
+         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
+         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
+         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
+         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
+         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
+         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
+         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
+         c2:15:2d:ca
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/07a45775.0 b/src/test/setup/radius-config/freeradius/certs_2/07a45775.0
new file mode 120000
index 0000000..799a1c6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/07a45775.0
@@ -0,0 +1 @@
+client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0 b/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0
new file mode 120000
index 0000000..799a1c6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/19a996e2.0
@@ -0,0 +1 @@
+client.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/34e00910.0 b/src/test/setup/radius-config/freeradius/certs_2/34e00910.0
new file mode 120000
index 0000000..55f0c91
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/34e00910.0
@@ -0,0 +1 @@
+01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/865470fd.0 b/src/test/setup/radius-config/freeradius/certs_2/865470fd.0
new file mode 120000
index 0000000..e375f5a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/865470fd.0
@@ -0,0 +1 @@
+ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0 b/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0
new file mode 120000
index 0000000..55f0c91
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/8fe581ba.0
@@ -0,0 +1 @@
+01.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/Makefile b/src/test/setup/radius-config/freeradius/certs_2/Makefile
new file mode 100644
index 0000000..c8f0892
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/Makefile
@@ -0,0 +1,140 @@
+######################################################################
+#
+#	Make file to be installed in /etc/raddb/certs to enable
+#	the easy creation of certificates.
+#
+#	See the README file in this directory for more information.
+#
+#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
+#
+######################################################################
+
+DH_KEY_SIZE	= 1024
+
+#
+#  Set the passwords
+#
+PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
+
+USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
+CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
+
+######################################################################
+#
+#  Make the necessary files, but not client certificates.
+#
+######################################################################
+.PHONY: all
+all: index.txt serial dh random server ca client
+
+.PHONY: client
+client: client.pem
+
+.PHONY: ca
+ca: ca.der
+
+.PHONY: server
+server: server.pem server.vrfy
+
+######################################################################
+#
+#  Diffie-Hellman parameters
+#
+######################################################################
+dh:
+	openssl dhparam -out dh $(DH_KEY_SIZE)
+
+######################################################################
+#
+#  Create a new self-signed CA certificate
+#
+######################################################################
+ca.key ca.pem: ca.cnf
+	@[ -f index.txt ] || $(MAKE) index.txt
+	@[ -f serial ] || $(MAKE) serial
+	openssl req -new -x509 -keyout ca.key -out ca.pem \
+		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
+
+ca.der: ca.pem
+	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
+
+######################################################################
+#
+#  Create a new server certificate, signed by the above CA.
+#
+######################################################################
+server.csr server.key: server.cnf
+	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
+
+server.crt: server.csr ca.key ca.pem
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
+
+server.p12: server.crt
+	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+server.pem: server.p12
+	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+.PHONY: server.vrfy
+server.vrfy: ca.pem
+	@openssl verify -CAfile ca.pem server.pem
+
+######################################################################
+#
+#  Create a new client certificate, signed by the the above server
+#  certificate.
+#
+######################################################################
+client.csr client.key: client.cnf
+	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
+
+client.crt: client.csr ca.pem ca.key
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
+
+client.p12: client.crt
+	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+
+client.pem: client.p12
+	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+	cp client.pem $(USER_NAME).pem
+
+.PHONY: client.vrfy
+client.vrfy: ca.pem client.pem
+	c_rehash .
+	openssl verify -CApath . client.pem
+
+######################################################################
+#
+#  Miscellaneous rules.
+#
+######################################################################
+index.txt:
+	@touch index.txt
+
+serial:
+	@echo '01' > serial
+
+random:
+	@if [ -c /dev/urandom ] ; then \
+		ln -sf /dev/urandom random; \
+	else \
+		date > ./random; \
+	fi
+
+print:
+	openssl x509 -text -in server.crt
+
+printca:
+	openssl x509 -text -in ca.pem
+
+clean:
+	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
+
+#
+#	Make a target that people won't run too often.
+#
+destroycerts:
+	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
+			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig b/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig
new file mode 100644
index 0000000..c8f0892
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/Makefile.orig
@@ -0,0 +1,140 @@
+######################################################################
+#
+#	Make file to be installed in /etc/raddb/certs to enable
+#	the easy creation of certificates.
+#
+#	See the README file in this directory for more information.
+#
+#	$Id: 0613df99502989a6d5751eb8b2088000c58cae98 $
+#
+######################################################################
+
+DH_KEY_SIZE	= 1024
+
+#
+#  Set the passwords
+#
+PASSWORD_SERVER	= `grep output_password server.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CA	= `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'`
+PASSWORD_CLIENT	= `grep output_password client.cnf | sed 's/.*=//;s/^ *//'`
+
+USER_NAME	= `grep emailAddress client.cnf | grep '@' | sed 's/.*=//;s/^ *//'`
+CA_DEFAULT_DAYS = `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'`
+
+######################################################################
+#
+#  Make the necessary files, but not client certificates.
+#
+######################################################################
+.PHONY: all
+all: index.txt serial dh random server ca client
+
+.PHONY: client
+client: client.pem
+
+.PHONY: ca
+ca: ca.der
+
+.PHONY: server
+server: server.pem server.vrfy
+
+######################################################################
+#
+#  Diffie-Hellman parameters
+#
+######################################################################
+dh:
+	openssl dhparam -out dh $(DH_KEY_SIZE)
+
+######################################################################
+#
+#  Create a new self-signed CA certificate
+#
+######################################################################
+ca.key ca.pem: ca.cnf
+	@[ -f index.txt ] || $(MAKE) index.txt
+	@[ -f serial ] || $(MAKE) serial
+	openssl req -new -x509 -keyout ca.key -out ca.pem \
+		-days $(CA_DEFAULT_DAYS) -config ./ca.cnf
+
+ca.der: ca.pem
+	openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der
+
+######################################################################
+#
+#  Create a new server certificate, signed by the above CA.
+#
+######################################################################
+server.csr server.key: server.cnf
+	openssl req -new  -out server.csr -keyout server.key -config ./server.cnf
+
+server.crt: server.csr ca.key ca.pem
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key $(PASSWORD_CA) -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf
+
+server.p12: server.crt
+	openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+server.pem: server.p12
+	openssl pkcs12 -in server.p12 -out server.pem -passin pass:$(PASSWORD_SERVER) -passout pass:$(PASSWORD_SERVER)
+
+.PHONY: server.vrfy
+server.vrfy: ca.pem
+	@openssl verify -CAfile ca.pem server.pem
+
+######################################################################
+#
+#  Create a new client certificate, signed by the the above server
+#  certificate.
+#
+######################################################################
+client.csr client.key: client.cnf
+	openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
+
+client.crt: client.csr ca.pem ca.key
+	openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key $(PASSWORD_CA) -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
+
+client.p12: client.crt
+	openssl pkcs12 -export -in client.crt -inkey client.key -out client.p12  -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+
+client.pem: client.p12
+	openssl pkcs12 -in client.p12 -out client.pem -passin pass:$(PASSWORD_CLIENT) -passout pass:$(PASSWORD_CLIENT)
+	cp client.pem $(USER_NAME).pem
+
+.PHONY: client.vrfy
+client.vrfy: ca.pem client.pem
+	c_rehash .
+	openssl verify -CApath . client.pem
+
+######################################################################
+#
+#  Miscellaneous rules.
+#
+######################################################################
+index.txt:
+	@touch index.txt
+
+serial:
+	@echo '01' > serial
+
+random:
+	@if [ -c /dev/urandom ] ; then \
+		ln -sf /dev/urandom random; \
+	else \
+		date > ./random; \
+	fi
+
+print:
+	openssl x509 -text -in server.crt
+
+printca:
+	openssl x509 -text -in ca.pem
+
+clean:
+	@rm -f *~ *old client.csr client.key client.crt client.p12 client.pem
+
+#
+#	Make a target that people won't run too often.
+#
+destroycerts:
+	rm -f *~ dh *.csr *.crt *.p12 *.der *.pem *.key index.txt* \
+			serial* random *\.0 *\.1
diff --git a/src/test/setup/radius-config/freeradius/certs_2/README b/src/test/setup/radius-config/freeradius/certs_2/README
new file mode 100644
index 0000000..f7e0591
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/README
@@ -0,0 +1,226 @@
+  This directory contains scripts to create the server certificates.
+To make a set of default (i.e. test) certificates, simply type:
+
+$ ./bootstrap
+
+  The "openssl" command will be run against the sample configuration
+files included here, and will make a self-signed certificate authority
+(i.e. root CA), and a server certificate.  This "root CA" should be
+installed on any client machine needing to do EAP-TLS, PEAP, or
+EAP-TTLS.
+
+  The Microsoft "XP Extensions" will be automatically included in the
+server certificate.  Without those extensions Windows clients will
+refuse to authenticate to FreeRADIUS.
+
+  The root CA and the "XP Extensions" file also contain a crlDistributionPoints
+attribute. The latest release of Windows Phone needs this to be present
+for the handset to validate the RADIUS server certificate. The RADIUS
+server must have the URI defined but the CA need not have...however it
+is best practice for a CA to have a revocation URI. Note that whilst
+the Windows Mobile client cannot actually use the CRL when doing 802.1X
+it is recommended that the URI be an actual working URL and contain a
+revocation format file as there may be other OS behaviour at play and
+future OSes that may do something with that URI.
+
+  In general, you should use self-signed certificates for 802.1x (EAP)
+authentication.  When you list root CAs from other organisations in
+the "ca_file", you permit them to masquerade as you, to authenticate
+your users, and to issue client certificates for EAP-TLS.
+
+  If FreeRADIUS was configured to use OpenSSL, then simply starting
+the server in root in debugging mode should also create test
+certificates, i.e.:
+
+$ radiusd -X
+
+  That will cause the EAP-TLS module to run the "bootstrap" script in
+this directory.  The script will be executed only once, the first time
+the server has been installed on a particular machine.  This bootstrap
+script SHOULD be run on installation of any pre-built binary package
+for your OS.  In any case, the script will ensure that it is not run
+twice, and that it does not over-write any existing certificates.
+
+  If you already have CA and server certificates, rename (or delete)
+this directory, and create a new "certs" directory containing your
+certificates.  Note that the "make install" command will NOT
+over-write your existing "raddb/certs" directory, which means that the
+"bootstrap" command will not be run.
+
+
+		NEW INSTALLATIONS OF FREERADIUS
+
+
+  We suggest that new installations use the test certificates for
+initial tests, and then create real certificates to use for normal
+user authentication.  See the instructions below for how to create the
+various certificates.  The old test certificates can be deleted by
+running the following command:
+
+$ rm -f *.pem *.der *.csr *.crt *.key *.p12 serial* index.txt*
+
+  Then, follow the instructions below for creating real certificates.
+
+  Once the final certificates have been created, you can delete the
+"bootstrap" command from this directory, and delete the
+"make_cert_command" configuration from the "tls" sub-section of
+eap.conf.
+
+  If you do not want to enable EAP-TLS, PEAP, or EAP-TTLS, then delete
+the relevant sub-sections from the "eap.conf" file.
+
+
+		MAKING A ROOT CERTIFICATE
+
+
+$ vi ca.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the CA certificate.
+
+  Edit the [certificate_authority] section to have the correct values
+  for your country, state, etc.
+
+$ make ca.pem
+
+  This step creates the CA certificate.
+
+$ make ca.der
+
+  This step creates the DER format of the self-signed certificate,
+  which is can be imported into Windows.
+
+
+		MAKING A SERVER CERTIFICATE
+
+
+$ vi server.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the server certificate.
+
+  Edit the [server] section to have the correct values for your
+  country, state, etc.  Be sure that the commonName field here is
+  different from the commonName for the CA certificate.
+
+$ make server.pem
+
+  This step creates the server certificate.
+
+  If you have an existing certificate authority, and wish to create a
+  certificate signing request for the server certificate, edit
+  server.cnf as above, and type the following command.
+
+$ make server.csr
+
+  You will have to ensure that the certificate contains the XP
+  extensions needed by Microsoft clients.
+
+
+		MAKING A CLIENT CERTIFICATE
+
+
+  Client certificates are used by EAP-TLS, and optionally by EAP-TTLS
+and PEAP.  The following steps outline how to create a client
+certificate that is signed by the server certificate created above.
+You will have to have the password for the server certificate in the
+"input_password" and "output_password" fields of the server.cnf file.
+
+
+$ vi client.cnf
+
+  Edit the "input_password" and "output_password" fields to be the
+  password for the client certificate.  You will have to give these
+  passwords to the end user who will be using the certificates.
+
+  Edit the [client] section to have the correct values for your
+  country, state, etc.  Be sure that the commonName field here is
+  the User-Name that will be used for logins!
+
+$ make client.pem
+
+  The users certificate will be in "emailAddress.pem",
+  i.e. "user@example.com.pem".
+
+  To create another client certificate, just repeat the steps for
+  making a client certificate, being sure to enter a different login
+  name for "commonName", and a different password.
+
+
+		PERFORMANCE
+
+
+  EAP performance for EAP-TLS, TTLS, and PEAP is dominated by SSL
+  calculations.  That is, a normal system can handle PAP
+  authentication at a rate of 10k packets/s.  However, SSL involves
+  RSA calculations, which are very expensive.  To benchmark your system,
+  do:
+
+$ openssl speed rsa
+
+  or
+
+$ openssl speed rsa2048
+
+  to test 2048 bit keys.
+
+  A 1GHz system will likely do 30 calculations/s.  A 2GHz system may
+  do 50 calculations/s, or more.  That number is also the number of
+  authentications/s that can be done for EAP-TLS (or TTLS, or PEAP).
+
+
+		COMPATIBILITY
+
+The certificates created using this method are known to be compatible
+with ALL operating systems.  Some common issues are:
+
+  - Windows requires certain OIDs in the certificates.  If it doesn't
+    see them, it will stop doing EAP.  The most visible effect is
+    that the client starts EAP, gets a few Access-Challenge packets,
+    and then a little while later re-starts EAP.  If this happens, see
+    the FAQ, and the comments in raddb/eap.conf for how to fix it.
+
+  - Windows requires the root certificates to be on the client PC.
+    If it doesn't have them, you will see the same issue as above.
+
+  - Windows XP post SP2 has a bug where it has problems with
+    certificate chains.  i.e. if the server certificate is an
+    intermediate one, and not a root one, then authentication will
+    silently fail, as above.
+
+  - Some versions of Windows CE cannot handle 4K RSA certificates.
+    They will (again) silently fail, as above.
+
+  - In none of these cases will Windows give the end user any
+    reasonable error message describing what went wrong.  This leads
+    people to blame the RADIUS server.  That blame is misplaced.
+
+  - Certificate chains of more than 64K bytes are known to not work.
+    This is a problem in FreeRADIUS.  However, most clients cannot
+    handle 64K certificate chains.  Most Access Points will shut down
+    the EAP session after about 50 round trips, while 64K certificate
+    chains will take about 60 round trips.  So don't use large
+    certificate chains.  They will only work after everyone upgrade
+    everything in the network.
+
+  - All other operating systems are known to work with EAP and
+    FreeRADIUS.  This includes Linux, *BSD, Mac OS X, Solaris,
+    Symbian, along with all known embedded systems, phones, WiFi
+    devices, etc.
+
+  - Someone needs to ask Microsoft to please stop making life hard for
+    their customers.
+
+
+		SECURITY CONSIDERATIONS
+
+The default certificate configuration files uses MD5 for message
+digests, to maintain compatibility with network equipment that
+supports only this algorithm.
+
+MD5 has known weaknesses and is discouraged in favour of SHA1 (see
+http://www.kb.cert.org/vuls/id/836068 for details). If your network
+equipment supports the SHA1 signature algorithm, we recommend that you
+change the "ca.cnf", "server.cnf", and "client.cnf" files to specify
+the use of SHA1 for the certificates. To do this, change the
+'default_md' entry in those files from 'md5' to 'sha1'.
diff --git a/src/test/setup/radius-config/freeradius/certs_2/bootstrap b/src/test/setup/radius-config/freeradius/certs_2/bootstrap
new file mode 100755
index 0000000..82f93ec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/bootstrap
@@ -0,0 +1,82 @@
+#!/bin/sh
+#
+#  This is a wrapper script to create default certificates when the
+#  server first starts in debugging mode.  Once the certificates have been
+#  created, this file should be deleted.
+#
+#  Ideally, this program should be run as part of the installation of any
+#  binary package.  The installation should also ensure that the permissions
+#  and owners are correct for the files generated by this script.
+#
+#  $Id: c9d939beac8d5bdc21ea1ff9233442f9ab933297 $
+#
+umask 027
+cd `dirname $0`
+
+make -h > /dev/null 2>&1
+
+#
+#  If we have a working "make", then use it.  Otherwise, run the commands
+#  manually.
+#
+if [ "$?" = "0" ]; then
+  make all
+  exit $?
+fi
+
+#
+#  The following commands were created by running "make -n", and edited
+#  to remove the trailing backslash, and to add "exit 1" after the commands.
+#
+#  Don't edit the following text.  Instead, edit the Makefile, and
+#  re-generate these commands.
+#
+if [ ! -f dh ]; then
+  openssl dhparam -out dh 1024 || exit 1
+  if [ -e /dev/urandom ] ; then
+	ln -sf /dev/urandom random
+  else
+	date > ./random;
+  fi
+fi
+
+if [ ! -f server.key ]; then
+  openssl req -new  -out server.csr -keyout server.key -config ./server.cnf || exit 1
+fi
+
+if [ ! -f ca.key ]; then
+  openssl req -new -x509 -keyout ca.key -out ca.pem -days `grep default_days ca.cnf | sed 's/.*=//;s/^ *//'` -config ./ca.cnf || exit 1
+fi
+
+if [ ! -f index.txt ]; then
+  touch index.txt
+fi
+
+if [ ! -f serial ]; then
+  echo '01' > serial
+fi
+
+if [ ! -f server.crt ]; then
+  openssl ca -batch -keyfile ca.key -cert ca.pem -in server.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out server.crt -extensions xpserver_ext -extfile xpextensions -config ./server.cnf || exit 1
+fi
+
+if [ ! -f server.p12 ]; then
+  openssl pkcs12 -export -in server.crt -inkey server.key -out server.p12  -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
+fi
+
+if [ ! -f server.pem ]; then
+  openssl pkcs12 -in server.p12 -out server.pem -passin pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` -passout pass:`grep output_password server.cnf | sed 's/.*=//;s/^ *//'` || exit 1
+  openssl verify -CAfile ca.pem server.pem || exit 1
+fi
+
+if [ ! -f ca.der ]; then
+  openssl x509 -inform PEM -outform DER -in ca.pem -out ca.der || exit 1
+fi
+
+if [ ! -f client.key ]; then
+  openssl req -new  -out client.csr -keyout client.key -config ./client.cnf
+fi
+
+if [ ! -f client.crt ]; then
+  openssl ca -batch -keyfile ca.key -cert ca.pem -in client.csr  -key `grep output_password ca.cnf | sed 's/.*=//;s/^ *//'` -out client.crt -extensions xpclient_ext -extfile xpextensions -config ./client.cnf
+fi
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.cnf b/src/test/setup/radius-config/freeradius/certs_2/ca.cnf
new file mode 100644
index 0000000..37207e8
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/ca.cnf
@@ -0,0 +1,62 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/ca.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/ca.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= certificate_authority
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+x509_extensions		= v3_ca
+
+[certificate_authority]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= admin@ciena.com
+commonName		= "Example Certificate Authority"
+
+[v3_ca]
+subjectKeyIdentifier	= hash
+authorityKeyIdentifier	= keyid:always,issuer:always
+basicConstraints	= CA:true
+crlDistributionPoints	= URI:http://www.example.com/example_ca.crl
+
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.der b/src/test/setup/radius-config/freeradius/certs_2/ca.der
new file mode 100644
index 0000000..a505cfc
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/ca.der
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.key b/src/test/setup/radius-config/freeradius/certs_2/ca.key
new file mode 100644
index 0000000..f6ce685
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/ca.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQInjXhKnIFIgUCAggA
+MBQGCCqGSIb3DQMHBAh+B3FNG/y9LQSCBMhDZu1kDr0UGMnD8mpB7H319e8GLakT
+8jzPI+bxethA4ZthxY3x5Jxcvyhzy92pR7lCG2Sr8HOVhPpxmp3W5HhbrDhdOj+Q
+qy4Y00G2JCos2wVKTecAOgE5a3i2hDVJDsGxqfwqdohBUwhxVwGDxyzelClN3tNW
+xlj6YuPdUly5tmS1Jo0vtw94QtKk1N0JlNxkTz98vfvgxD4vHqMZugfV2EX2P985
+taRb2bX8VE5rh7CkNhYmYxyv5LACM+7IyM4yvUBfqJG0JPg4UKDVya1dm2mHb7+I
+6C7UcOM7phMZeHNT1gixzSl1UFEtBJaPgmxKIIyBUls7bgKOtNf+BNo+OTWfrnH+
+nvMAoEEPr0GT/fD1tpiR4JaRYXMUHrUt8kGw3Kayr2q9HYJuDeb1uwxK/ml+7aGN
+km9IEV/8Cc0/7TGSQR8jqS+evEy9Sv7tiB2rGnwB6hsbvT+l3jQdv/cX00vMPhRA
+g2KBqifiXRnZnYWlY1KAhZZm0BqJuohYko+xJ2yL2I5q8H7UooN+ND8nsaROOELq
+4FdwJd3MaHEgEnyPL5zFrCcv/R/v1GZNJGt0GT2marwPjLTkGcvDZvbX44a/InyV
+IlYZA/JNmWP6NlABhZIf/mzxnOWPjBwqq2y8Wg2PXpYrumj1nIUoO5B96YYo6lvS
+wlAjIYAxLy8IsUMURDfpvm2CCIW0aGTwO8YSyWnxLBa27MG8fWubjJafm+K4SOf3
+uLiBWlbrkCG7jvNL07/mnkjlP4n05Olb5nrpeTKOz3nklbQmsBhC5/OHZj7ZlUul
+gAR6/U3B0sefMsdqxuls3w8qfgrV1oQGATxvWgYs5zFa/bXBSN1L+brc2q+8ZtgR
+GkFIwnXPWiKB7GIlamER24a1nctR4vL+sYmpmlav+OS6n/jItTCYed+dQ5inC3hX
+4rdGiAjylaTDkW7k4dtIXGUJNGZbIxrpAqNYOVYrCyAEj+HdpNuTUUO2vohq+EM1
+og7SeLhsVg1bG3lYRaqZaXjsof2NAruFJ8aH93DcwoClxFjNJxOd9YAXIA83Uvz8
+D2Bu1/Z41Grq8O7YEnrYbxJP77G9PAgCLt2Uc16O91Lpg1gZ3gESX2BmuR38wbyv
+t5MoC1/oSBV+643yq2ldQRYOMSKl/CLoApywcatdHCIiDC3AEIklueG5jA9Diutl
+ZfK8XSpBEYPQm+eHLdfUOTTnF3SoNPDGbm102nKyvgmGpReFgREYyZSwvg/1YuL/
+m8S+lR+gmP3i9Y4/0UcccI24tO5s0FI4od/4BZ4NW9JsYKxCTj/WJCH4bpmjtmwK
+WI1XSxso1ueVQ7qJBVJyEsMa480nJ5GMKoEfzhqzGzvT1awcz5y/Q/4vIjGZVmR2
+GekRkn9uadPQnIsYGX99A5gPAXP+oCJ9MqLXZPdWLXdm0OybAkD++ryKfi3DNYq2
+TO4hcHLi7lEaIgDcOt+RWTkF0y6yZ3vnY6llvQTRF7fe+6R4YJg0On69+Lt6BoZw
+hmgaaR8YJl++eFWzCJjdJJrCPIiQginbGbpks2Zrz5hGGcQhNwomRX5DFVouePK5
+qhd54Myo2di+Fu0Ls86+nFwnIs9s1+c/2rDWzV1aRfEjnv3OUSLi1saoXjiunMBq
+/L4=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/ca.pem b/src/test/setup/radius-config/freeradius/certs_2/ca.pem
new file mode 100644
index 0000000..916cdf9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/ca.pem
@@ -0,0 +1,28 @@
+-----BEGIN CERTIFICATE-----
+MIIEyTCCA7GgAwIBAgIJAM6l2jUG56pLMA0GCSqGSIb3DQEBCwUAMIGLMQswCQYD
+VQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UE
+ChMKQ2llbmEgSW5jLjEeMBwGCSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYw
+JAYDVQQDEx1FeGFtcGxlIENlcnRpZmljYXRlIEF1dGhvcml0eTAeFw0xNjAzMTEx
+ODUzMzVaFw0xNzAzMDYxODUzMzVaMIGLMQswCQYDVQQGEwJVUzELMAkGA1UECBMC
+Q0ExEjAQBgNVBAcTCVNvbWV3aGVyZTETMBEGA1UEChMKQ2llbmEgSW5jLjEeMBwG
+CSqGSIb3DQEJARYPYWRtaW5AY2llbmEuY29tMSYwJAYDVQQDEx1FeGFtcGxlIENl
+cnRpZmljYXRlIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL9Jv54TkqycL3U2Fdd/y5NXdnPVXwAVV3m6I3eIffVCv8eS+mwlbl9dnbjo
+qqlGEgA3sEg5HtnKoW81l3PSyV/YaqzUzbcpDlgWlbNkFQ3nVxh61gSU34Fc4h/W
+plSvCkwGSbV5udLtEe6S9IflP2Fu/eXa9vmUtoPqDk66p9U/nWVf2H1GJy7XanWg
+wke+HpQvbzoSfPJS0e5Rm9KErrzaIkJpqt7soW+OjVJitUax7h45RYY1HHHlbMQ0
+ndWW8UDsCxFQO6d7nsijCzY69Y8HarH4mbVtqhg3KJevxD9UMRy6gdtPMDZLah1c
+LHRu14ucOK4aF8oICOgtcD06auUCAwEAAaOCASwwggEoMB0GA1UdDgQWBBQwEs0m
+c8HARTVp21wtiwgav5biqjCBwAYDVR0jBIG4MIG1gBQwEs0mc8HARTVp21wtiwga
+v5biqqGBkaSBjjCBizELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
+EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHjAcBgkqhkiG9w0BCQEW
+D2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBB
+dXRob3JpdHmCCQDOpdo1BueqSzAMBgNVHRMEBTADAQH/MDYGA1UdHwQvMC0wK6Ap
+oCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZI
+hvcNAQELBQADggEBAK+fyAFO8CbH35P5mOX+5wf7+AeC+5pwaFcoCV0zlfwniANp
+jISgcIX9rcetLxeYRAO5com3+qLdd9dGVNL0kwufH4QhlSPErG7OLHHAs4JWVhUo
+bH3lK9lgFVlnCDBtQhslzqScR64SCicWcQEjv3ZMZsJwYLvl8unSaKz4+LVPeJ2L
+opCpmZw/V/S2NhBbe3QjTiRPmDev2gbaO4GCfi/6sCDU7UO3o8KryrkeeMIiFIej
+gfwn9fovmpeqCEyupy2JNNUTJibEuFknwx7JAX+htPL27nEgwV1FYtwI3qLiZqkM
+729wo9cFSslJNZBu+GsBP5LszQSuvNTDWytV+qY=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.cnf b/src/test/setup/radius-config/freeradius/certs_2/client.cnf
new file mode 100644
index 0000000..994d3ab
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.cnf
@@ -0,0 +1,53 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/ca.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/ca.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= client
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+
+[client]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= user@ciena.com
+commonName		= user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.crt b/src/test/setup/radius-config/freeradius/certs_2/client.crt
new file mode 100644
index 0000000..1197fec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.crt
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 2 (0x2)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:36 2016 GMT
+            Not After : Mar  6 18:53:36 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=user@ciena.com/emailAddress=user@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:ec:5e:99:c0:6c:3e:7f:6d:66:c0:9a:e6:8d:89:
+                    03:ab:10:0c:2e:c0:e8:98:02:35:75:e5:d9:51:73:
+                    28:b3:4d:bc:1f:4f:4b:a0:fb:64:1b:10:e4:09:f4:
+                    cc:40:cc:37:38:b9:d6:ae:e5:9e:b6:20:d5:7c:a2:
+                    13:84:bc:17:33:06:00:5a:fd:e1:19:46:31:02:54:
+                    6d:10:fd:57:fa:2a:b3:33:17:e2:4f:be:88:03:8a:
+                    b4:80:35:82:bd:f1:ed:98:be:d2:d9:23:55:25:73:
+                    5d:39:a0:36:78:42:84:06:a6:74:cb:23:61:41:b9:
+                    f8:26:3d:58:08:9c:5f:2c:be:54:45:77:cd:3a:61:
+                    65:90:d1:3a:37:23:12:9e:26:fd:34:97:54:f6:0f:
+                    81:80:d7:23:8d:18:64:a5:f9:05:db:ea:ca:45:ad:
+                    4f:fb:48:81:96:f8:f1:14:b5:34:fc:8d:fd:79:02:
+                    63:39:77:6e:fd:b5:ab:1e:cc:73:47:dc:11:bb:09:
+                    04:82:11:61:35:24:7f:19:ec:8a:57:27:98:bc:52:
+                    60:ef:a9:f8:36:7d:b8:12:c1:cd:c4:2d:fb:84:5d:
+                    e6:92:d2:7e:2d:b5:58:cd:fd:d9:9d:a8:3a:2d:ef:
+                    b3:f3:98:00:f0:2a:82:68:b6:25:63:af:de:67:8f:
+                    6b:ff
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Client Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         2c:1c:cc:3c:34:c8:07:ab:0b:c8:f9:74:4f:a2:55:32:33:a4:
+         ca:57:40:56:15:d6:89:0b:13:8d:a1:90:18:95:14:7b:57:26:
+         50:9c:99:6d:46:44:73:5d:cc:ca:05:cc:3c:e3:38:c7:bc:fa:
+         56:95:c6:ba:ad:5c:fd:5c:67:57:34:36:7c:d9:db:25:c2:00:
+         1e:2f:fb:1b:c5:b9:fd:24:1f:3d:eb:08:54:3b:07:4c:47:38:
+         66:ca:4f:8b:98:e5:4b:f3:15:5f:71:ce:0b:e0:43:6c:e8:dd:
+         6f:0a:8f:45:7d:09:12:bf:ae:3b:28:63:3b:e9:51:c4:6f:22:
+         94:c4:40:0a:80:54:6f:0d:5e:0e:e5:43:a0:40:60:12:b4:94:
+         0b:8e:29:ab:98:a8:0f:0d:b1:7a:57:3e:63:a8:50:76:6b:58:
+         c1:f6:34:0d:bb:f0:c4:7b:40:e3:de:5f:ac:bc:8f:71:ab:2d:
+         0e:24:ff:ce:b7:bb:34:be:75:33:25:03:3e:38:d8:8e:08:4d:
+         40:4c:2f:bb:ae:88:29:b4:37:4f:5b:49:06:b8:08:ef:f5:88:
+         f9:da:a1:28:11:68:94:a1:8a:4c:35:88:1e:c6:57:42:f6:75:
+         b2:71:ae:fc:54:58:ce:0d:65:f9:1f:e3:4f:c7:11:07:d0:43:
+         c2:15:2d:ca
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.csr b/src/test/setup/radius-config/freeradius/certs_2/client.csr
new file mode 100644
index 0000000..8f8a518
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.csr
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICwDCCAagCAQAwezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRIwEAYDVQQH
+EwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5hIEluYy4xHTAbBgkqhkiG9w0BCQEW
+DnVzZXJAY2llbmEuY29tMRcwFQYDVQQDFA51c2VyQGNpZW5hLmNvbTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgC
+NXXl2VFzKLNNvB9PS6D7ZBsQ5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlG
+MQJUbRD9V/oqszMX4k++iAOKtIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5
++CY9WAicXyy+VEV3zTphZZDROjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tI
+gZb48RS1NPyN/XkCYzl3bv21qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9
+uBLBzcQt+4Rd5pLSfi21WM392Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaAA
+MA0GCSqGSIb3DQEBCwUAA4IBAQB030zqg/C6+0rwf+nsdQJvpUGFVCT3WJRf7Qx5
+NC3n6hfetLHs7XjPZ77CI2B1VEPE7r55Mv1m81b1+2WO/jFQXlM52CteOSLy/Zsj
+lUBW4naaCa+C3liOn1cSONNClvKMGl2DcTbOFO8j9A3dAOHUR05SeAtGutVip9CS
+NPl36MmwFUO0p25UkmG4IJIZPVaMEjqEPVjWxnRFrajFwsbyMkHEFIEvQ/TP1qpN
+LzLmp+Y4flS4O7zC3AAt4Zayr4AC5cf4JKDJxxfZ+qE0KS7jV4bJdo5hxpGz4ECC
+/LDZPZN9oGr67bNSjM4+Ogdx5v4Huojn/lQPK4gTME8SIUPX
+-----END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.key b/src/test/setup/radius-config/freeradius/certs_2/client.key
new file mode 100644
index 0000000..a2e92c3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIY2/Sy6WJDBYCAggA
+MBQGCCqGSIb3DQMHBAgzT2/EDIfXJASCBMjE2epab8bFXVgs1E2P02G/LzexUvvO
+gH9GltVbzVSmsuWNKaGBqWvRNiKrIiGIBQpNZsdV/0ae/5Etw3qkvcO/vTiPAOgC
+3+vkaNZMIpI4jePvvCzeUlnybg4+WBq6aXZLRQc8uCWkApH/HfcnwxCKEKebPqj5
+F1xmzT8WOSRJYytrwzU7GWQtsDwFCv0KnTJeYni9CVLIX8jFLtMB1mRhYZ93eSiM
+DjsSr2OH/AOiZQEzCv5YMbDk4WD9L1MD5S62bpWxdwG/aEr8E1dI1Z2TJHzx75dR
+lWdoV1BQHfKmsQRtwnZ/Hq6zmzY+SStJGFUcRdBdLdJrfRcIyTJXVkFYoVMM/PDl
+UT4K0pIcDILH7jPNp7kuDfSDigFNvqk4O6GybN+TT7cQKH5oGtEsvGSOfUYZUEvp
+KV4rpyR+n3NPC1tEoOvfuGlqHDGN62pdTVhFM/FqFbZSEoTzlSU+OecLiQrGtS6T
+hrmWt/go20MxTlWh87L0s9SRalP4A2YkufHBst8oSgwI4DzVhifqqWD87w7iL0Ur
+6drgbtlM2hY3onkwS2+oSzEIIlwLwaBaAt2hnVosgZIQajcmlayIhRQ1SNsYYj2T
+YTTTYxPWwUaIYzOl7Ri1OoD5dSFY84sUAD7odLMpzmEJQIi31KYIdOs1BN5oDpFV
+GbcKtF7sKw2QBb8nZgADobpCHIJIG/SLNqx4UgSZYgLVUgW0xaS8+8ylVLqRkIjM
+yoGkxqezc2pvCAbH8BMGYaZei5TL9GHanae+t6caBK9Zty6m9bdT9H9EkC6NEWhX
+IuKGZjyq/+O1mFK/66ts+tq9mynyZfVxxAKINijGLEWKPL0KAZkZIfFnCfXO7kK/
+JJNp5zE8GX9FFT5w8sq2UTsfS/F6K8kih+gZVJtj4irnWiABLq4VQjBRPeJJFt5Q
+Zki48dH5JP1/0222mka+ynRfv6pAtSN1Y5vx2mDPNoxiajhfMoLAxFkwwUYA3AfI
+DMTByk7n27HfWtmkUV+Zx263NVkCU0/BjOE3j7N1OojSuCizJRIT199hRhmnTFoy
+FPRrmYF4g/HU1ca6u8If5JzZAfJIqN8H9oHHTdWg5HuU31NpQPHgQqRGvaaBpuwc
+oglzg6mhl/4dUlZiw6l7bJGMojid24iTMgt6FkUqma1ECQ2wp3SF06u7iFecB78B
+aKJhOGOF1GHc0DMHNwLfSw1wIBah5K1SFm4JQyEYrG/KeRGXRKKGlKPGTKQPQRCU
+LCqbWnFMPBW5b/V/Xv02BBemgwp44RsFDQo6AVp6zbzWwh84oLrFSuGMK7aGynGA
+/MzGAmrC5jbIC62WAejlSj51o6sHoQNjn59PULZWqsbfD0DWH1DXeGqzLNd0phx7
+v1yDjLVq7J64YNYtxctZ+G54Pkg5wHTyx+dt3gKi/wVSc/cOHNDC2QxWhvSxL0cp
+/QpgggxaADcPZkvQe2/34wbqBTYbz9j+PODuad8hrqSLMLzX5iIFqE8qOYNPwH8z
+Lz66G4k3kp307/+0pocIRASn9dtX0PgpGyKo1hvg3zYNP+ObRPOT06Zx1HhEIx2S
+7oQXaQNDJpZd5tO+s7RY80ficybUe7wC4BnqNaoxVluBaIEA2NdiPHOiL5Sh0sme
+0oI=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.p12 b/src/test/setup/radius-config/freeradius/certs_2/client.p12
new file mode 100644
index 0000000..d1289a9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.p12
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/client.pem b/src/test/setup/radius-config/freeradius/certs_2/client.pem
new file mode 100644
index 0000000..6dc7d9b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/client.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
+MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
+Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
+d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
+Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
+ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
+1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
+vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
+gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
+cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
+RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
+8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
+nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
+aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
+XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
+vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
+aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
+QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
+U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
+AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
+Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
+T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
+0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
+ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
+sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
+VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
+B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
+uGo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/dh b/src/test/setup/radius-config/freeradius/certs_2/dh
new file mode 100644
index 0000000..e7b4f90
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/dh
@@ -0,0 +1,5 @@
+-----BEGIN DH PARAMETERS-----
+MIGHAoGBAKHERxCGYaLWD6ay09DuGxxs5whd4zFUS1pjA7jEvGwnbISSzGvzRbYi
+ymNeNgzrZhHiWo5GC008yLvUy0qxVMny0x+7xybup+mOv6ITEz+HuhlsBN+Aqc5P
+Oyq7h1qnuy8UiiEP87YcwhCFooQ3I8dCcMT7AVApYex4K81Sck/LAgEC
+-----END DH PARAMETERS-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0 b/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0
new file mode 120000
index 0000000..e375f5a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/fef12f18.0
@@ -0,0 +1 @@
+ca.pem
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt b/src/test/setup/radius-config/freeradius/certs_2/index.txt
new file mode 100644
index 0000000..27c2c7a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/index.txt
@@ -0,0 +1,2 @@
+V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
+V	170306185336Z		02	unknown	/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr
new file mode 100644
index 0000000..8f7e63a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr
@@ -0,0 +1 @@
+unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old
new file mode 100644
index 0000000..8f7e63a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/index.txt.attr.old
@@ -0,0 +1 @@
+unique_subject = yes
diff --git a/src/test/setup/radius-config/freeradius/certs_2/index.txt.old b/src/test/setup/radius-config/freeradius/certs_2/index.txt.old
new file mode 100644
index 0000000..f0ce0ce
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/index.txt.old
@@ -0,0 +1 @@
+V	170306185335Z		01	unknown	/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
diff --git a/src/test/setup/radius-config/freeradius/certs_2/random b/src/test/setup/radius-config/freeradius/certs_2/random
new file mode 120000
index 0000000..a222f14
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/random
@@ -0,0 +1 @@
+/dev/urandom
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/certs_2/serial b/src/test/setup/radius-config/freeradius/certs_2/serial
new file mode 100644
index 0000000..75016ea
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/serial
@@ -0,0 +1 @@
+03
diff --git a/src/test/setup/radius-config/freeradius/certs_2/serial.old b/src/test/setup/radius-config/freeradius/certs_2/serial.old
new file mode 100644
index 0000000..9e22bcb
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/serial.old
@@ -0,0 +1 @@
+02
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.cnf b/src/test/setup/radius-config/freeradius/certs_2/server.cnf
new file mode 100644
index 0000000..444372d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.cnf
@@ -0,0 +1,54 @@
+[ ca ]
+default_ca		= CA_default
+
+[ CA_default ]
+dir			= ./
+certs			= $dir
+crl_dir			= $dir/crl
+database		= $dir/index.txt
+new_certs_dir		= $dir
+certificate		= $dir/server.pem
+serial			= $dir/serial
+crl			= $dir/crl.pem
+private_key		= $dir/server.key
+RANDFILE		= $dir/.rand
+name_opt		= ca_default
+cert_opt		= ca_default
+default_days		= 360
+default_crl_days	= 300
+default_md		= sha1
+preserve		= no
+policy			= policy_match
+
+[ policy_match ]
+countryName		= match
+stateOrProvinceName	= match
+organizationName	= match
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ policy_anything ]
+countryName		= optional
+stateOrProvinceName	= optional
+localityName		= optional
+organizationName	= optional
+organizationalUnitName	= optional
+commonName		= supplied
+emailAddress		= optional
+
+[ req ]
+prompt			= no
+distinguished_name	= server
+default_bits		= 2048
+input_password		= whatever
+output_password		= whatever
+
+[server]
+countryName		= US
+stateOrProvinceName	= CA
+localityName		= Somewhere
+organizationName	= Ciena Inc.
+emailAddress		= admin@ciena.com
+commonName		= "Example Server Certificate"
+
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.crt b/src/test/setup/radius-config/freeradius/certs_2/server.crt
new file mode 100644
index 0000000..246df1b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.crt
@@ -0,0 +1,80 @@
+Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number: 1 (0x1)
+    Signature Algorithm: sha1WithRSAEncryption
+        Issuer: C=US, ST=CA, L=Somewhere, O=Ciena Inc./emailAddress=admin@ciena.com, CN=Example Certificate Authority
+        Validity
+            Not Before: Mar 11 18:53:35 2016 GMT
+            Not After : Mar  6 18:53:35 2017 GMT
+        Subject: C=US, ST=CA, O=Ciena Inc., CN=Example Server Certificate/emailAddress=admin@ciena.com
+        Subject Public Key Info:
+            Public Key Algorithm: rsaEncryption
+                Public-Key: (2048 bit)
+                Modulus:
+                    00:a7:9b:3d:b2:8f:6d:dd:55:c7:34:5a:8b:c7:78:
+                    a8:ff:14:fa:21:0e:60:1b:0c:87:36:f1:07:3a:cc:
+                    f1:8a:9d:23:4c:31:8d:81:92:0b:1f:b2:f9:6f:55:
+                    79:c3:fd:18:8f:99:a7:8b:8c:41:18:a6:02:08:cb:
+                    b5:5b:8b:b7:23:a3:6d:20:a9:ec:ee:bf:fa:f1:99:
+                    d7:07:35:a1:3b:e9:5e:b7:84:8a:db:5d:46:15:3e:
+                    1f:92:2d:12:db:4c:c3:aa:13:c7:dd:2d:a0:0a:d2:
+                    3c:59:19:fa:7c:d9:a5:b4:16:bd:82:ba:35:47:c4:
+                    dc:fb:af:61:f1:70:d8:b3:2c:ef:91:20:c5:d5:af:
+                    b7:ac:5d:15:4e:ea:64:ab:0b:b3:ee:25:7e:aa:a8:
+                    a0:a5:36:2e:59:ed:b8:c7:02:4f:ab:9b:e7:50:4c:
+                    30:14:4d:48:1a:a2:88:05:6e:7e:82:ef:f8:c5:70:
+                    b5:d8:3e:ae:f6:e0:2e:68:ba:52:d3:e5:3a:2d:0f:
+                    dd:43:86:39:b5:af:5b:c3:86:7b:98:78:7f:d5:9b:
+                    ee:9f:e4:50:5e:03:9e:29:67:f5:78:35:b1:d3:e2:
+                    66:2d:68:36:c2:30:c9:06:c2:1c:73:9b:c3:09:28:
+                    ba:08:b8:f5:49:e0:5b:d1:43:d9:38:06:47:32:a2:
+                    de:a9
+                Exponent: 65537 (0x10001)
+        X509v3 extensions:
+            X509v3 Extended Key Usage: 
+                TLS Web Server Authentication
+            X509v3 CRL Distribution Points: 
+
+                Full Name:
+                  URI:http://www.example.com/example_ca.crl
+
+    Signature Algorithm: sha1WithRSAEncryption
+         58:cd:50:ef:5f:b1:3e:34:a2:5d:f4:59:f6:11:25:be:de:b2:
+         f8:58:81:2a:89:f0:e7:df:36:88:49:8e:d2:c4:44:22:6c:40:
+         a4:13:6c:8c:15:9d:f2:9e:32:29:ff:31:f0:82:92:3e:93:58:
+         ce:eb:da:9b:19:76:7d:de:54:c5:b1:b2:2e:66:4c:7b:7c:9c:
+         98:12:f9:20:aa:d3:c7:d3:0b:70:5a:c3:24:d7:b9:92:8e:38:
+         fe:54:21:c6:c9:e4:c8:b8:b6:ae:8a:0e:2d:18:95:53:da:b6:
+         9b:94:12:0d:68:e8:ef:0a:78:8b:29:cc:0f:59:a4:d8:dc:6c:
+         34:b2:7a:6f:de:63:1e:e1:03:d2:f3:ca:b6:26:05:f4:22:51:
+         2a:ff:78:d1:07:b3:e7:7e:ab:68:33:2a:0a:d0:cc:be:26:ea:
+         6a:6e:10:d7:2b:7a:7b:cb:e6:0d:50:66:7f:9a:33:31:ad:8d:
+         1b:3b:3f:8e:74:29:3d:07:37:9d:4d:29:ad:b6:cc:84:d8:1c:
+         09:48:61:ce:67:30:ee:74:25:fe:23:5a:8d:00:f6:1a:5d:de:
+         04:70:a7:ea:e9:6f:b0:25:10:f4:3a:70:ab:5a:57:5f:53:12:
+         d8:0e:52:f4:f2:f5:dc:25:71:e5:46:24:09:65:95:22:1b:35:
+         8b:78:9b:6f
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.csr b/src/test/setup/radius-config/freeradius/certs_2/server.csr
new file mode 100644
index 0000000..d055b9e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.csr
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIICzjCCAbYCAQAwgYgxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UE
+BxMJU29tZXdoZXJlMRMwEQYDVQQKEwpDaWVuYSBJbmMuMR4wHAYJKoZIhvcNAQkB
+Fg9hZG1pbkBjaWVuYS5jb20xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRp
+ZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp5s9so9t3VXH
+NFqLx3io/xT6IQ5gGwyHNvEHOszxip0jTDGNgZILH7L5b1V5w/0Yj5mni4xBGKYC
+CMu1W4u3I6NtIKns7r/68ZnXBzWhO+let4SK211GFT4fki0S20zDqhPH3S2gCtI8
+WRn6fNmltBa9gro1R8Tc+69h8XDYsyzvkSDF1a+3rF0VTupkqwuz7iV+qqigpTYu
+We24xwJPq5vnUEwwFE1IGqKIBW5+gu/4xXC12D6u9uAuaLpS0+U6LQ/dQ4Y5ta9b
+w4Z7mHh/1Zvun+RQXgOeKWf1eDWx0+JmLWg2wjDJBsIcc5vDCSi6CLj1SeBb0UPZ
+OAZHMqLeqQIDAQABoAAwDQYJKoZIhvcNAQELBQADggEBAI4JSSggafFTzjYU4n9s
+lKYCCy8+MQ2X9eEKcsBwDiPvXmJdeWJTFYfBHE5p6spyA3IULxb9N90Kajdf287L
+e1Gurh4XuLd2gv/UAR4fpYJ6r0jJviWAe77R1cuJ+QvQWOaSWzJmxtZkO7OdBl0A
+XmksiRLnKu0mGEcGSQQ3vfdwDTGqpfLuSn9U6B8FoK7AjkeB1oKf6UgHnbN01UKp
+ubExjX4lNHLLiNrXjBkyDpW5zBbbhEaPdnDLHvNnd6fTkd7F3Jt5timmrm9hKMMB
+hE7qLyiBoSdqFejZEPjcvJGV42sNetREqIrWnvsXrox+7P+5z8+uowebLXGohfJC
+hmc=
+-----END CERTIFICATE REQUEST-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.key b/src/test/setup/radius-config/freeradius/certs_2/server.key
new file mode 100644
index 0000000..63be1bd
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.key
@@ -0,0 +1,30 @@
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI9q05vTmS4WYCAggA
+MBQGCCqGSIb3DQMHBAi4iVH6BL89ZQSCBMiK+P9gWMj1xFZqEQZ/VUTB0bt6YM8q
+nc1otC9KdWVCl5zqdV3vC0BdLMGv3Xem+u/ycWyrSsqDD3RzFqIjIxJTsAQRMKwr
+Fu4sNBMyAh0TCzVtf73QjiRg8Jtkf4UbTfJzNo1J3rjglnkSJ+9rCAYT4Ei84opN
+T/pdlhw9uRRsz7v+HRzajcpyw6FxtjLOUI2PaG8Lyrlrgt6uP1PvurK2+jexZ8o6
+OWIo5kbrn/rpzTiHWNgRoWnT71J5/lXE8hkjtv/5WAuncPAaUVdo0nKg58RD66St
+MOfQKlISeOdNw0yUWNPKkr98Tnp+fSUFHV4NCpMoV7mgab16grd8XR4qnOYuq8Ay
+9m0kzvffeASJj9hmpRDrZGrPXijNCRtEE8WQv3tLAYRaH180m6qCr7cOCS89LZZ4
+sVEIiAsOgCuAX3E3PGrdFbsGR6MnRpoHNxtUkD5g/b//8HTJ7b0EMKp00VTuHQRH
+JxxTZnbPSmsHJ+RmKL1K3eHqCDXuTPVFdDh82mabd/EiSdfj13+8etMQrF62XhDw
+r/2ElsO1yIPkXg9+FuC67EIBkYEbpuCXkvqYeuYEskEtoSDCj5yoX/aNJUkVImA3
+zveRCH8GMD0kaIf9IQdQ1jJxUGc3ZWFo6MIFAUD5eGXfwWX1x11sFJP2uBdf+31A
+0GhFICUaziHcDrHtqp5/nzo8f0hh+y3zXLx/Mf+WMC0Nirh7nyMoEmeNufYZtdvI
+5u90rYiPr7yS8vQD1R0LQZnODmtx0akn9HAtFvGzFbfa6x+2RoPpDiKS43ZCQPeW
+8JhWakNKijzfl2vufVUtSDZ5cPg5oyTH2NMw+DAgxqowtPmYV9J+ecZ9akwKk1Uz
+cLpNPrDmdUCyfztU5tlfTIdduafj9eIIgvVZs9wajlEWvooMW7cwbKYA0I5wYdq2
+lqFvnJtngUuvykYPFLg/ME+bXbdmQ6M91HpxOqUKp1feX4TW6yDlStpA40vPO3iB
+HmfL1DW3O4JTmvBwdoLPYoL5vP3/st51vXMXUcnyjHAzCa4HXj80PWyBsCM6S/iT
+SJtieMXSLw7R30D5boXncQS/fBCsdJpEpz2GyjJUn2RLbYJ3OsQbXB0eCaL7y9LL
+hGVK5Ez/HWjZ7Q6WRotVjeO5yRIgzWe4VRV58CVOH2CIkf1ODolzhREyzSBCGD6Q
+5rOZSAd21aStrNWQ02nYPXZbcnTo1LQImonSQ4SJZg0lsRSHfahmXkKafyYg5U8E
+jiff1uzSWWtmSZkY46S4dzQOZsY97k8cChliSnY1Jk8mh/5D9ehLxalUNMv0DIN/
+yTgYmC1TasTdchkSZdEyli8xvGWcmMKC+A5ycfRyE2mPxuEL6nQq4MAH7Yie9g7T
+Fzamniy0SXT08yXu2oFhi7VLyxSbGvIBQqE06rh2NVgt+N1eRSa/SJlkB6iqEmEA
+X+4b3D3s+ST6bZ19b6PP1t4tbfpGZ3LGezndpY4GqgfsUi5hdQcdfRjknCyFRZKm
+Qqi43ojk1xsdUHt/q0Y4RFHMtR5oQTapRXybQBRbzS7KCiRsH356ACowvV0UCNg2
+WzfFm3uozQO6NJCfWePdkfVrxU0p4q9s9QxxDX5SApQpqcwt0rJiDOzXvxKH8jx6
+qHo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.p12 b/src/test/setup/radius-config/freeradius/certs_2/server.p12
new file mode 100644
index 0000000..352d346
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.p12
Binary files differ
diff --git a/src/test/setup/radius-config/freeradius/certs_2/server.pem b/src/test/setup/radius-config/freeradius/certs_2/server.pem
new file mode 100644
index 0000000..b8b70f5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/server.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=Example Server Certificate/emailAddress=admin@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDyjCCArKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM1WhcN
+MTcwMzA2MTg1MzM1WjB0MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xIzAhBgNVBAMTGkV4YW1wbGUgU2VydmVyIENlcnRpZmlj
+YXRlMR4wHAYJKoZIhvcNAQkBFg9hZG1pbkBjaWVuYS5jb20wggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCnmz2yj23dVcc0WovHeKj/FPohDmAbDIc28Qc6
+zPGKnSNMMY2BkgsfsvlvVXnD/RiPmaeLjEEYpgIIy7Vbi7cjo20gqezuv/rxmdcH
+NaE76V63hIrbXUYVPh+SLRLbTMOqE8fdLaAK0jxZGfp82aW0Fr2CujVHxNz7r2Hx
+cNizLO+RIMXVr7esXRVO6mSrC7PuJX6qqKClNi5Z7bjHAk+rm+dQTDAUTUgaoogF
+bn6C7/jFcLXYPq724C5oulLT5TotD91Dhjm1r1vDhnuYeH/Vm+6f5FBeA54pZ/V4
+NbHT4mYtaDbCMMkGwhxzm8MJKLoIuPVJ4FvRQ9k4Bkcyot6pAgMBAAGjTzBNMBMG
+A1UdJQQMMAoGCCsGAQUFBwMBMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cu
+ZXhhbXBsZS5jb20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEBAFjN
+UO9fsT40ol30WfYRJb7esvhYgSqJ8OffNohJjtLERCJsQKQTbIwVnfKeMin/MfCC
+kj6TWM7r2psZdn3eVMWxsi5mTHt8nJgS+SCq08fTC3BawyTXuZKOOP5UIcbJ5Mi4
+tq6KDi0YlVPatpuUEg1o6O8KeIspzA9ZpNjcbDSyem/eYx7hA9LzyrYmBfQiUSr/
+eNEHs+d+q2gzKgrQzL4m6mpuENcrenvL5g1QZn+aMzGtjRs7P450KT0HN51NKa22
+zITYHAlIYc5nMO50Jf4jWo0A9hpd3gRwp+rpb7AlEPQ6cKtaV19TEtgOUvTy9dwl
+ceVGJAlllSIbNYt4m28=
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 59 6E 27 68 23 9E 59 B9 D8 DD B4 FC 7B 78 89 F7 50 02 A7 A7 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQIQUZafKqU+44CAggA
+MBQGCCqGSIb3DQMHBAhxfosFDCxaJwSCBMhbvtkYjR1vowEixVQS2J5vL4EhHv7x
+ImI1vnLIbjY6HmUGuOolLidZQ7ieG1hrElat3gPCgw7zfmZyS3DOnLTxBAZRlOxK
+3zkBnegVGNbOnnsLJJX52JSDJvGnlUPWg3r8UhFp3aPH1eqUyt0bTySgpsSdt3yD
+/oWymM1bQW65KfKW3cskR+oKyqjh4rQevyTf5dR2r4noVfR96RqdJWJ95ag40GXN
+gpLDBTZD+iuMN1PiH9CraJWbRIWQOM0ieC79wGZ57V5tzgHENNey2itgwJ93iCj0
+Ay4f0HUEOuqJ7kK1fYEo+MUBt5TzpLBygGIVgbusz57C6DgCHwhBFtLS952GkmEP
+CAKM9I7wWR3w0Mj5maz4kq2hSzou1j81+ivxSkXMEGsCfwbrjY1QIitZdeEu31ti
+uf9+Jx2tK2yIu7+MLnMnpB7vdXrrPT6wipGMBe8a1/sczE2/foW0e2VarQIuS8bt
+fVpnfXT91Mf0DVn6Bc+ZI3EMG555Ah7GqbVztAlRm6IpbpFyFixx8m6oBwYc/ik6
+fReFzBOq+hV9VPAwYkzGlR+6hhfxhCUyE89LmB2z+fJvEMRj+X8LG21bHTkJoymp
+E/a4NIvOZv1vE3PpK7quZDm9HT/hdTsXbqlfbIBPcpJyUSbTcdBX2jcXfTz0od8Z
+e1iNlQ93d8FHuZFbtYiiZRSWGHPXI3sc96qY12cbUftZy20eN2esn37l13mDi0uS
+Qn0lAQFQwnEF4RROSSoLJefXc9kNXxq2cgZ/rWuUerwQQfMWU5tPwDS5UEoJjQg3
+eK2GH8YMoUuS178X9IU8cXD6vFkSOQ4uZ7L8sY7YHxqo8FeKW+YA7j5U8aNkVC3X
+crlV7VAbfd5k8NDaNe39dM8YNfJre6yBF8Wbvh6HV2a2JgzeQHQPXqLIKC27MCCY
+67P/IHmTis5Yz/tDKwO19N463VrDC6wno6fQdeNe5j3j29/y3YAkJweUtvXCYYJ6
+MOBh5hM+jMJWNSnfERUhjzp+FDoVzZgcxZ8OKbkOr6QZo3WBC7ogoJAVIaNS9Kl+
+RXUhdEd2uoYzwcNEmE9EqRTs8+Yy4VlgPS2iHWy+lboa+1Fi4yAZzeTmAd/BLQNB
+kLUI4OzniBtHn0C4oHz+Lfkm24t5iR5pxIWhNnOOxS0gkObtyWPlcC3LXYZ85ude
+mR8265l5FP9jabzfnCfoZWtdnIBUNcwAcB5oCdChipfJobXrmjyp5W8Sw8enr0BU
+ZJ2MwTGufoeQ3t2IsybY82TuXB6aLegevH3xC4kJV3We83LcUxNhkqmycU935ew0
+cJVQO8C3J5U4Pha8tn1+mKvDaKcv4HmG0YZyN48tdOtR1y4+Xzhq9hSwKl+rzG1Y
+TP0mW1fNfHRDrbykxkIpAXay9kDtfafalMI3ShndZfYiYJBe8IB+m9NML/lEPQyC
+fHH3xPNixHu74a71b6xgMZFhrrXBikfMUB1qroWa+9ocy/5LvdfCRIQN+ti7Tb4F
+FH5qzP/qAfjEdejuIdHHKNs/wkhTixqi8QCkDWEXkDj8AsiVmiBva6luSuQ31OiT
+ERZmRhkZfpkKmo4Jgc12dNsOqXYPF2KJ16bSElfuY5PGYR8JEw9Tz1k1UaMmrOGR
+guU=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem b/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem
new file mode 100644
index 0000000..6dc7d9b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/user@ciena.com.pem
@@ -0,0 +1,60 @@
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+subject=/C=US/ST=CA/O=Ciena Inc./CN=user@ciena.com/emailAddress=user@ciena.com
+issuer=/C=US/ST=CA/L=Somewhere/O=Ciena Inc./emailAddress=admin@ciena.com/CN=Example Certificate Authority
+-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
+-----END CERTIFICATE-----
+Bag Attributes
+    localKeyID: 8F EF 1A 9E BD 4E 8D A1 8E 01 DF AF D0 5A 74 29 F5 76 99 DB 
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFDjBABgkqhkiG9w0BBQ0wMzAbBgkqhkiG9w0BBQwwDgQI6qtqDG5BZo0CAggA
+MBQGCCqGSIb3DQMHBAi1fAR8FIi0cgSCBMjz9+X+jT+0Bop2xt6SN0SHb/zqlDKP
+Ca1mvOp0+hbID9/lZ4uh71QhxwFnEp/Vnoz2Shxavb75SQKNBVWuazsiJogrFfTj
+d/3PLlTpyIheFLpBkh95Gt4103ON4wPDh2g6j8hpEJoBy4qxb4C8FrWJ05LYjONI
+Uz+zlAn6v2pWkep/kLijb9hCyBjlVRECdArwL/Y7KzFwiGGJX+FtDABrZKfHZw7l
+ovgPQE/7bSG8/8clJ+1IM1AWTFqxpvnj66lWh/DMfoAvNKH45lpGvkbNy9UHIJsu
+1fYyeGibGSisGuLCvsojt/Idmz2O/zU1suv8/ZVV3vRWe+FpUUZqC1rVTKA+BVyy
+vWiFi3u0tezWNiey0y7ocFxIv5+PDH87cG54xRfwQIgDB4QAdcaJpmA4pkU7puhS
+gv54wcAxLRhQGy4lVOiqBkIVzux0m12L/Jh6Ctl8EfYMrrdQeBZMAVtC/qLgv2O4
+cJWVzD69lveMNOekLswaOzKVxwpe2jFeWf9TsDpV1+r+sYcT3b9xHmo7gHLH/Vu5
+RcIRD6QFBKyt8lvqNUSysDA8GnRcw/AInrfaLz7Nly768gkSjMLd1EByCraX8GaD
+8RPl8thtjqbfoC/j2+UjYQw79QnxOia2K0Ft3aXh40YTMQqXDZuoPR3ajaS4Fyz5
+nxnctUtk0i85p8Ge4V3pMZCC6EZdTrzgKv8GvFJLzmy7vfTbJBapK8LlwUJMWyyv
+aED55kkv2lhjPT/Qy4iz5j9/Q5DgEENEB42x0SDK8fjTGZ8vrIEp7kzPMYuqpFZ8
+XFiwHB4brrpq7gxyedG7FbzWqO+t3xSvgXRYDj7WKMUPZXw7QGvGNci2U7GiPVM1
+vPCpIllgh2ZJ9p1vvf44o6Aoh2i4Hkl24zRTSV5L4c5yu8Eeckj6lJubTSlrfa2d
+aEIyO6SK3jgXJKXl70Xv4jAe5M5JDkK85k9HAXQryceWi8dpuHfNb9akFX8vMymh
+QCd2QzROyAytHGAPgH/55hHIg93ORkpMA9GzsutH9+u14uziSpbr3B3j8uOgKTnN
+U+rrEKP9Wm23efJ/X5IzEfFPeGqG4dZDOn9US/WkwiTNFmy834T89DRT609ckSVD
+AhAlQVOXLE2gp/0i+b2vdT1Br+PRqQOXh0sv9x6uL56tvuUG3nnOWGZSljZmEm5p
+Qa8JSjY3ZDn6KC1FJyk4DTf7U38zTi2Z/+AL8K680zEJ6Uc9HI3wVdd19Cf84RdQ
+T6fD/LkvBAWTHzVxMrxYVPqE/3Sf1AJX0cmzciZhxrCml4RQZ78i98/yJaxo7ql7
+0Q34+KaAXHP1QghjmXBERBcyHgNeV/jUUXsxZA/MCkn7PvtCuHkNqc2ZlmN0hLwz
+ObHOo826krBRSlpn7P3DreEjSxzCxPpUXAn6f6bXhtVVIiy6ITkWWfVoLD1/crTc
+sRZMi/EKkmgfIzgCYt3JzAdoii3D7ebxcvfVThbDguOE78OhKY3n4wyJ+FpUn56j
+VcX6Ckl2hAE9qwRNOi383+35A0iUb+vF7ky3K/xMONog+dwdTRkhDbz6rTlO72Tj
+B9xcVJAbopB90NNfrG7LRD472O+t+53C27tG1kyIOaCvXPmeFwgTcLLVjDAyFzXb
+uGo=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/src/test/setup/radius-config/freeradius/certs_2/xpextensions b/src/test/setup/radius-config/freeradius/certs_2/xpextensions
new file mode 100644
index 0000000..8e4a9a2
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/certs_2/xpextensions
@@ -0,0 +1,24 @@
+#
+#  File containing the OIDs required for Windows.
+#
+#  http://support.microsoft.com/kb/814394/en-us
+#
+[ xpclient_ext]
+extendedKeyUsage = 1.3.6.1.5.5.7.3.2
+crlDistributionPoints = URI:http://www.example.com/example_ca.crl
+
+[ xpserver_ext]
+extendedKeyUsage = 1.3.6.1.5.5.7.3.1
+crlDistributionPoints = URI:http://www.example.com/example_ca.crl
+
+#
+#  Add this to the PKCS#7 keybag attributes holding the client's private key
+#  for machine authentication.
+#
+#  the presence of this OID tells Windows XP that the cert is intended
+#  for use by the computer itself, and not by an end-user.
+#
+#  The other solution is to use Microsoft's web certificate server
+#  to generate these certs.
+#
+# 1.3.6.1.4.1.311.17.2
diff --git a/src/test/setup/radius-config/freeradius/clients.conf b/src/test/setup/radius-config/freeradius/clients.conf
new file mode 100644
index 0000000..ea41823
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/clients.conf
@@ -0,0 +1,1626 @@
+# -*- text -*-
+##
+## clients.conf -- client configuration directives
+##
+##	$Id: 81f450102d9f1a3bc72264ab8d06543591fcab98 $
+
+#######################################################################
+#
+#  Define RADIUS clients (usually a NAS, Access Point, etc.).
+
+#
+#  Defines a RADIUS client.
+#
+#  '127.0.0.1' is another name for 'localhost'.  It is enabled by default,
+#  to allow testing of the server after an initial installation.  If you
+#  are not going to be permitting RADIUS queries from localhost, we suggest
+#  that you delete, or comment out, this entry.
+#
+#
+
+#
+#  Each client has a "short name" that is used to distinguish it from
+#  other clients.
+#
+#  In version 1.x, the string after the word "client" was the IP
+#  address of the client.  In 2.0, the IP address is configured via
+#  the "ipaddr" or "ipv6addr" fields.  For compatibility, the 1.x
+#  format is still accepted.
+#
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password 
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client 0.0.0.0/0{
+	secret = radius_password
+}
+client ipv6{
+	ipv6addr = ::
+	secret = radius_password
+}
+client localhost {
+	#  Allowed values are:
+	#	dotted quad (1.2.3.4)
+	#	   hostname	(radius.example.com)
+	ipaddr = 127.0.0.1
+
+	#  OR, you can use an IPv6 address, but not both
+	#  at the same time.
+#	ipv6addr = ::	# any.  ::1 == localhost
+
+	#
+	#  The transport protocol.
+	#
+	#  If unspecified, defaults to "udp", which is the traditional
+	#  RADIUS transport.  It may also be "tcp", in which case the
+	#  server will accept connections from this client ONLY over TCP.
+	#
+	proto = *
+
+	#
+	#  A note on DNS:  We STRONGLY recommend using IP addresses
+	#  rather than host names.  Using host names means that the
+	#  server will do DNS lookups when it starts, making it
+	#  dependent on DNS.  i.e. If anything goes wrong with DNS,
+	#  the server won't start!
+	#
+	#  The server also looks up the IP address from DNS once, and
+	#  only once, when it starts.  If the DNS record is later
+	#  updated, the server WILL NOT see that update.
+	#
+
+	#  One client definition can be applied to an entire network.
+	#  e.g. 127/8 should be defined with "ipaddr = 127.0.0.0" and
+	#  "netmask = 8"
+	#
+	#  If not specified, the default netmask is 32 (i.e. /32)
+	#
+	#  We do NOT recommend using anything other than 32.  There
+	#  are usually other, better ways to achieve the same goal.
+	#  Using netmasks of other than 32 can cause security issues.
+	#
+	#  You can specify overlapping networks (127/8 and 127.0/16)
+	#  In that case, the smallest possible network will be used
+	#  as the "best match" for the client.
+	#
+	#  Clients can also be defined dynamically at run time, based
+	#  on any criteria.  e.g. SQL lookups, keying off of NAS-Identifier,
+	#  etc.
+	#  See raddb/sites-available/dynamic-clients for details.
+	#
+
+#	netmask = 32
+
+	#
+	#  The shared secret use to "encrypt" and "sign" packets between
+	#  the NAS and FreeRADIUS.  You MUST change this secret from the
+	#  default, otherwise it's not a secret any more!
+	#
+	#  The secret can be any string, up to 8k characters in length.
+	#
+	#  Control codes can be entered vi octal encoding,
+	#	e.g. "\101\102" == "AB"
+	#  Quotation marks can be entered by escaping them,
+	#	e.g. "foo\"bar"
+	#
+	#  A note on security:  The security of the RADIUS protocol
+	#  depends COMPLETELY on this secret!  We recommend using a
+	#  shared secret that is composed of:
+	#
+	#	upper case letters
+	#	lower case letters
+	#	numbers
+	#
+	#  And is at LEAST 8 characters long, preferably 16 characters in
+	#  length.  The secret MUST be random, and should not be words,
+	#  phrase, or anything else that is recognisable.
+	#
+	#  The default secret below is only for testing, and should
+	#  not be used in any real environment.
+	#
+	secret = radius_password radius_password 
+
+	#
+	#  Old-style clients do not send a Message-Authenticator
+	#  in an Access-Request.  RFC 5080 suggests that all clients
+	#  SHOULD include it in an Access-Request.  The configuration
+	#  item below allows the server to require it.  If a client
+	#  is required to include a Message-Authenticator and it does
+	#  not, then the packet will be silently discarded.
+	#
+	#  allowed values: yes, no
+	require_message_authenticator = no
+
+	#
+	#  The short name is used as an alias for the fully qualified
+	#  domain name, or the IP address.
+	#
+	#  It is accepted for compatibility with 1.x, but it is no
+	#  longer necessary in 2.0
+	#
+#	shortname = localhost
+
+	#
+	# the following three fields are optional, but may be used by
+	# checkrad.pl for simultaneous use checks
+	#
+
+	#
+	# The nas_type tells 'checkrad.pl' which NAS-specific method to
+	#  use to query the NAS for simultaneous use.
+	#
+	#  Permitted NAS types are:
+	#
+	#	cisco
+	#	computone
+	#	livingston
+	#	juniper
+	#	max40xx
+	#	multitech
+	#	netserver
+	#	pathras
+	#	patton
+	#	portslave
+	#	tc
+	#	usrhiper
+	#	other		# for all other types
+
+	#
+	nas_type	 = other	# localhost isn't usually a NAS...
+
+	#
+	#  The following two configurations are for future use.
+	#  The 'naspasswd' file is currently used to store the NAS
+	#  login name and password, which is used by checkrad.pl
+	#  when querying the NAS for simultaneous use.
+	#
+#	login	   = !root
+#	password	= someadminpas
+
+	#
+	#  As of 2.0, clients can also be tied to a virtual server.
+	#  This is done by setting the "virtual_server" configuration
+	#  item, as in the example below.
+	#
+#	virtual_server = home1
+
+	#
+	#  A pointer to the "home_server_pool" OR a "home_server"
+	#  section that contains the CoA configuration for this
+	#  client.  For an example of a coa home server or pool,
+	#  see raddb/sites-available/originate-coa
+#	coa_server = coa
+
+	#
+	#  Connection limiting for clients using "proto = tcp".
+	#
+	#  This section is ignored for clients sending UDP traffic
+	#
+	limit {
+		#
+		#  Limit the number of simultaneous TCP connections from a client
+		#
+		#  The default is 16.
+		#  Setting this to 0 means "no limit"
+		max_connections = 16
+
+		#  The per-socket "max_requests" option does not exist.
+
+		#
+		#  The lifetime, in seconds, of a TCP connection.  After
+		#  this lifetime, the connection will be closed.
+		#
+		#  Setting this to 0 means "forever".
+		lifetime = 0
+
+		#
+		#  The idle timeout, in seconds, of a TCP connection.
+		#  If no packets have been received over the connection for
+		#  this time, the connection will be closed.
+		#
+		#  Setting this to 0 means "no timeout".
+		#
+		#  We STRONGLY RECOMMEND that you set an idle timeout.
+		#
+		idle_timeout = 30
+	}
+}
+
+# IPv6 Client
+#client ::1 {
+#	secret		= 
+#	shortname	= localhost
+#}
+#
+# All IPv6 Site-local clients
+#client fe80::/16 {
+#	secret		= 
+#	shortname	= localhost
+#}
+
+#client some.host.org {
+#	secret		= 
+#	shortname	= localhost
+#}
+
+#
+#  You can now specify one secret for a network of clients.
+#  When a client request comes in, the BEST match is chosen.
+#  i.e. The entry from the smallest possible network.
+#
+#client 192.0.2.0/24 {
+#	secret		= -1
+#	shortname	= private-network-1
+#}
+#
+#client 198.51.100.0/24 {
+#	secret		= -2
+#	shortname	= private-network-2
+#}
+
+
+#client 203.0.113.1 {
+#	# secret and password are mapped through the "secrets" file.
+#	secret		= 
+#	shortname	= liv1
+#}
+
+client 172.17.0.0/16 {
+	# secret and password are mapped through the "secrets" file.
+	secret		= radius_password
+	shortname	= auth-test
+}
+# The following three fields are optional, but may be used by
+# checkrad.pl for simultaneous usage checks
+
+#	nas_type	= livingston
+#	login		= !root
+#	password	= someadminpas
+#}
+
+#######################################################################
+#
+#  Per-socket client lists.  The configuration entries are exactly
+#  the same as above, but they are nested inside of a section.
+#
+#  You can have as many per-socket client lists as you have "listen"
+#  sections, or you can re-use a list among multiple "listen" sections.
+#
+#  Un-comment this section, and edit a "listen" section to add:
+#  "clients = per_socket_clients".  That IP address/port combination
+#  will then accept ONLY the clients listed in this section.
+#
+#clients per_socket_clients {
+#	client 192.0.2.4 {
+#		secret = radius_password radius_password 
+#	}
+#}
diff --git a/src/test/setup/radius-config/freeradius/clients.conf.orig b/src/test/setup/radius-config/freeradius/clients.conf.orig
new file mode 100644
index 0000000..f9ad206
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/clients.conf.orig
@@ -0,0 +1,314 @@
+# -*- text -*-
+##
+## clients.conf -- client configuration directives
+##
+##	$Id: 81f450102d9f1a3bc72264ab8d06543591fcab98 $
+
+#######################################################################
+#
+#  Define RADIUS clients (usually a NAS, Access Point, etc.).
+
+#
+#  Defines a RADIUS client.
+#
+#  '127.0.0.1' is another name for 'localhost'.  It is enabled by default,
+#  to allow testing of the server after an initial installation.  If you
+#  are not going to be permitting RADIUS queries from localhost, we suggest
+#  that you delete, or comment out, this entry.
+#
+#
+
+#
+#  Each client has a "short name" that is used to distinguish it from
+#  other clients.
+#
+#  In version 1.x, the string after the word "client" was the IP
+#  address of the client.  In 2.0, the IP address is configured via
+#  the "ipaddr" or "ipv6addr" fields.  For compatibility, the 1.x
+#  format is still accepted.
+#
+client 0.0.0.0/0{
+	secret = testing123
+}
+client ipv6{
+	ipv6addr = ::
+	secret = 
+}
+client 0.0.0.0/0{
+	secret = 
+}
+client ipv6{
+	ipv6addr = ::
+	secret = 
+}
+client 0.0.0.0/0{
+	secret = testing123
+}
+client ipv6{
+	ipv6addr = ::
+	secret = testing123
+}
+client 0.0.0.0/0{
+	secret = testing123
+}
+client ipv6{
+	ipv6addr = ::
+	secret = testing123
+}
+client localhost {
+	#  Allowed values are:
+	#	dotted quad (1.2.3.4)
+	#	   hostname	(radius.example.com)
+	ipaddr = 127.0.0.1
+
+	#  OR, you can use an IPv6 address, but not both
+	#  at the same time.
+#	ipv6addr = ::	# any.  ::1 == localhost
+
+	#
+	#  The transport protocol.
+	#
+	#  If unspecified, defaults to "udp", which is the traditional
+	#  RADIUS transport.  It may also be "tcp", in which case the
+	#  server will accept connections from this client ONLY over TCP.
+	#
+	proto = *
+
+	#
+	#  A note on DNS:  We STRONGLY recommend using IP addresses
+	#  rather than host names.  Using host names means that the
+	#  server will do DNS lookups when it starts, making it
+	#  dependent on DNS.  i.e. If anything goes wrong with DNS,
+	#  the server won't start!
+	#
+	#  The server also looks up the IP address from DNS once, and
+	#  only once, when it starts.  If the DNS record is later
+	#  updated, the server WILL NOT see that update.
+	#
+
+	#  One client definition can be applied to an entire network.
+	#  e.g. 127/8 should be defined with "ipaddr = 127.0.0.0" and
+	#  "netmask = 8"
+	#
+	#  If not specified, the default netmask is 32 (i.e. /32)
+	#
+	#  We do NOT recommend using anything other than 32.  There
+	#  are usually other, better ways to achieve the same goal.
+	#  Using netmasks of other than 32 can cause security issues.
+	#
+	#  You can specify overlapping networks (127/8 and 127.0/16)
+	#  In that case, the smallest possible network will be used
+	#  as the "best match" for the client.
+	#
+	#  Clients can also be defined dynamically at run time, based
+	#  on any criteria.  e.g. SQL lookups, keying off of NAS-Identifier,
+	#  etc.
+	#  See raddb/sites-available/dynamic-clients for details.
+	#
+
+#	netmask = 32
+
+	#
+	#  The shared secret use to "encrypt" and "sign" packets between
+	#  the NAS and FreeRADIUS.  You MUST change this secret from the
+	#  default, otherwise it's not a secret any more!
+	#
+	#  The secret can be any string, up to 8k characters in length.
+	#
+	#  Control codes can be entered vi octal encoding,
+	#	e.g. "\101\102" == "AB"
+	#  Quotation marks can be entered by escaping them,
+	#	e.g. "foo\"bar"
+	#
+	#  A note on security:  The security of the RADIUS protocol
+	#  depends COMPLETELY on this secret!  We recommend using a
+	#  shared secret that is composed of:
+	#
+	#	upper case letters
+	#	lower case letters
+	#	numbers
+	#
+	#  And is at LEAST 8 characters long, preferably 16 characters in
+	#  length.  The secret MUST be random, and should not be words,
+	#  phrase, or anything else that is recognisable.
+	#
+	#  The default secret below is only for testing, and should
+	#  not be used in any real environment.
+	#
+	secret = 
+
+	#
+	#  Old-style clients do not send a Message-Authenticator
+	#  in an Access-Request.  RFC 5080 suggests that all clients
+	#  SHOULD include it in an Access-Request.  The configuration
+	#  item below allows the server to require it.  If a client
+	#  is required to include a Message-Authenticator and it does
+	#  not, then the packet will be silently discarded.
+	#
+	#  allowed values: yes, no
+	require_message_authenticator = no
+
+	#
+	#  The short name is used as an alias for the fully qualified
+	#  domain name, or the IP address.
+	#
+	#  It is accepted for compatibility with 1.x, but it is no
+	#  longer necessary in 2.0
+	#
+#	shortname = localhost
+
+	#
+	# the following three fields are optional, but may be used by
+	# checkrad.pl for simultaneous use checks
+	#
+
+	#
+	# The nas_type tells 'checkrad.pl' which NAS-specific method to
+	#  use to query the NAS for simultaneous use.
+	#
+	#  Permitted NAS types are:
+	#
+	#	cisco
+	#	computone
+	#	livingston
+	#	juniper
+	#	max40xx
+	#	multitech
+	#	netserver
+	#	pathras
+	#	patton
+	#	portslave
+	#	tc
+	#	usrhiper
+	#	other		# for all other types
+
+	#
+	nas_type	 = other	# localhost isn't usually a NAS...
+
+	#
+	#  The following two configurations are for future use.
+	#  The 'naspasswd' file is currently used to store the NAS
+	#  login name and password, which is used by checkrad.pl
+	#  when querying the NAS for simultaneous use.
+	#
+#	login	   = !root
+#	password	= someadminpas
+
+	#
+	#  As of 2.0, clients can also be tied to a virtual server.
+	#  This is done by setting the "virtual_server" configuration
+	#  item, as in the example below.
+	#
+#	virtual_server = home1
+
+	#
+	#  A pointer to the "home_server_pool" OR a "home_server"
+	#  section that contains the CoA configuration for this
+	#  client.  For an example of a coa home server or pool,
+	#  see raddb/sites-available/originate-coa
+#	coa_server = coa
+
+	#
+	#  Connection limiting for clients using "proto = tcp".
+	#
+	#  This section is ignored for clients sending UDP traffic
+	#
+	limit {
+		#
+		#  Limit the number of simultaneous TCP connections from a client
+		#
+		#  The default is 16.
+		#  Setting this to 0 means "no limit"
+		max_connections = 16
+
+		#  The per-socket "max_requests" option does not exist.
+
+		#
+		#  The lifetime, in seconds, of a TCP connection.  After
+		#  this lifetime, the connection will be closed.
+		#
+		#  Setting this to 0 means "forever".
+		lifetime = 0
+
+		#
+		#  The idle timeout, in seconds, of a TCP connection.
+		#  If no packets have been received over the connection for
+		#  this time, the connection will be closed.
+		#
+		#  Setting this to 0 means "no timeout".
+		#
+		#  We STRONGLY RECOMMEND that you set an idle timeout.
+		#
+		idle_timeout = 30
+	}
+}
+
+# IPv6 Client
+#client ::1 {
+#	secret		= 
+#	shortname	= localhost
+#}
+#
+# All IPv6 Site-local clients
+#client fe80::/16 {
+#	secret		= 
+#	shortname	= localhost
+#}
+
+#client some.host.org {
+#	secret		= 
+#	shortname	= localhost
+#}
+
+#
+#  You can now specify one secret for a network of clients.
+#  When a client request comes in, the BEST match is chosen.
+#  i.e. The entry from the smallest possible network.
+#
+#client 192.0.2.0/24 {
+#	secret		= -1
+#	shortname	= private-network-1
+#}
+#
+#client 198.51.100.0/24 {
+#	secret		= -2
+#	shortname	= private-network-2
+#}
+
+
+#client 203.0.113.1 {
+#	# secret and password are mapped through the "secrets" file.
+#	secret		= 
+#	shortname	= liv1
+#}
+
+client 172.17.0.0/16 {
+	# secret and password are mapped through the "secrets" file.
+	secret		= testing123 
+	shortname	= auth-test
+}
+# The following three fields are optional, but may be used by
+# checkrad.pl for simultaneous usage checks
+
+#	nas_type	= livingston
+#	login		= !root
+#	password	= someadminpas
+#}
+
+#######################################################################
+#
+#  Per-socket client lists.  The configuration entries are exactly
+#  the same as above, but they are nested inside of a section.
+#
+#  You can have as many per-socket client lists as you have "listen"
+#  sections, or you can re-use a list among multiple "listen" sections.
+#
+#  Un-comment this section, and edit a "listen" section to add:
+#  "clients = per_socket_clients".  That IP address/port combination
+#  will then accept ONLY the clients listed in this section.
+#
+#clients per_socket_clients {
+#	client 192.0.2.4 {
+#		secret = 
+#	}
+#}
diff --git a/src/test/setup/radius-config/freeradius/dictionary b/src/test/setup/radius-config/freeradius/dictionary
new file mode 100644
index 0000000..1f7dc90
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/dictionary
@@ -0,0 +1,49 @@
+#
+#	This is the local dictionary file which can be
+#	edited by local administrators.  It will be loaded
+#	AFTER the main dictionary files are loaded.
+#
+#	As of version 3.0.2, FreeRADIUS will automatically
+#	load the main dictionary files from
+#
+#		${prefix}/share/freeradius/dictionary
+#
+#	It is no longer necessary for this file to $INCLUDE
+#	the main dictionaries.  However, if the $INCLUDE
+#	line is here, nothing bad will happen.
+#
+#	Any new/changed attributes MUST be placed in this file.
+#	The pre-defined dictionaries SHOULD NOT be edited.
+#
+#	See "man dictionary" for documentation on its format.
+#
+#	$Id: eed5d70f41b314f9ed3f006a22d9f9a2be2c9516 $
+#
+
+#
+#	All local attributes and $INCLUDE's should go into
+#	this file.
+#
+
+#	If you want to add entries to the dictionary file,
+#	which are NOT going to be placed in a RADIUS packet,
+#	add them to the 'dictionary.local' file.
+#
+#	The numbers you pick should be between 3000 and 4000.
+#	These attributes will NOT go into a RADIUS packet.
+#
+#	If you want that, you will need to use VSAs.  This means
+#	requesting allocation of a Private Enterprise Code from
+#	http://iana.org.  We STRONGLY suggest doing that only if
+#	you are a vendor of RADIUS equipment.
+#
+#	See RFC 6158 for more details.
+#	http://ietf.org/rfc/rfc6158.txt
+#
+
+#
+#	These attributes are examples
+#
+#ATTRIBUTE	My-Local-String		3000	string
+#ATTRIBUTE	My-Local-IPAddr		3001	ipaddr
+#ATTRIBUTE	My-Local-Integer	3002	integer
diff --git a/src/test/setup/radius-config/freeradius/experimental.conf b/src/test/setup/radius-config/freeradius/experimental.conf
new file mode 100644
index 0000000..e5395f3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/experimental.conf
@@ -0,0 +1,116 @@
+#
+#  This file contains the configuration for experimental modules.
+#
+#  By default, it is NOT included in the build.
+#
+#  $Id: 87d9744a4f0fa7b9b06b4908ddd6b7d2f1a7fd62 $
+#
+
+# Configuration for the Python module.
+#
+# Where radiusd is a Python module, radiusd.py, and the
+# function 'authorize' is called.  Here is a dummy piece
+# of code:
+#
+#	def authorize(params):
+#		print params
+#		return (5, ('Reply-Message', 'banned'))
+#
+# The RADIUS value-pairs are passed as a tuple of tuple
+# pairs as the first argument, e.g. (('attribute1',
+# 'value1'), ('attribute2', 'value2'))
+#
+# The function return is a tuple with the first element
+# being the return value of the function.
+# The 5 corresponds to RLM_MODULE_USERLOCK. I plan to
+# write the return values as Python symbols to avoid
+# confusion.
+#
+# The remaining tuple members are the string form of
+# value-pairs which are passed on to pairmake().
+#
+python {
+	mod_instantiate = radiusd_test
+	func_instantiate = instantiate
+
+	mod_authorize = radiusd_test
+	func_authorize = authorize
+
+	mod_accounting = radiusd_test
+	func_accounting = accounting
+
+	mod_pre_proxy = radiusd_test
+	func_pre_proxy = pre_proxy
+
+	mod_post_proxy = radiusd_test
+	func_post_proxy = post_proxy
+
+	mod_post_auth = radiusd_test
+	func_post_auth = post_auth
+
+	mod_recv_coa = radiusd_test
+	func_recv_coa = recv_coa
+
+	mod_send_coa = radiusd_test
+	func_send_coa = send_coa
+
+	mod_detach = radiusd_test
+	func_detach = detach
+}
+
+
+# Configuration for the example module.  Uncommenting it will cause it
+# to get loaded and initialised, but should have no real effect as long
+# it is not referenced in one of the autz/auth/preacct/acct sections
+example {
+	#  Boolean variable.
+	# allowed values: {no, yes}
+	boolean = yes
+
+	#  An integer, of any value.
+	integer = 16
+
+	#  A string.
+	string = "This is an example configuration string"
+
+	# An IP address, either in dotted quad (1.2.3.4) or hostname
+	# (example.com)
+	ipaddr = 127.0.0.1
+
+	# A subsection
+	mysubsection {
+		anotherinteger = 1000
+		# They nest
+		deeply nested {
+			string = "This is a different string"
+		}
+	}
+}
+
+#
+#  To create a dbm users file, do:
+#
+#   cat test.users | rlm_dbm_parser -f /etc/raddb/users_db
+#
+#  Then add 'dbm' in 'authorize' section.
+#
+#  Note that even if the file has a ".db" or ".dbm" extension,
+#  you may have to specify it here without that extension.  This
+#  is because the DBM libraries "helpfully" add a ".db" to the
+#  filename, but don't check if it's already there.
+#
+dbm {
+	usersfile = ${confdir}/users_db
+}
+
+# Instantiate a couple instances of the idn module
+idn {
+}
+
+# ...more commonly known as...
+idn idna {
+}
+
+idn idna_lenient {
+	UseSTD3ASCIIRules = no
+}
diff --git a/src/test/setup/radius-config/freeradius/hints b/src/test/setup/radius-config/freeradius/hints
new file mode 120000
index 0000000..f45fc9e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/hints
@@ -0,0 +1 @@
+mods-config/preprocess/hints
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/huntgroups b/src/test/setup/radius-config/freeradius/huntgroups
new file mode 120000
index 0000000..c2d27ff
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/huntgroups
@@ -0,0 +1 @@
+mods-config/preprocess/huntgroups
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-available/README.rst b/src/test/setup/radius-config/freeradius/mods-available/README.rst
new file mode 100644
index 0000000..8ffb764
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/README.rst
@@ -0,0 +1,79 @@
+Modules in Version 3
+====================
+
+As of Version 3, all of the modules have been places in the
+"mods-available/" directory.  This practice follows that used by other
+servers such as Nginx, Apache, etc.  The "modules" directory should
+not be used.
+
+Modules are enabled by creating a file in the mods-enabled/ directory.
+You can also create a soft-link from one directory to another::
+
+  $ cd raddb/mods-enabled
+  $ ln -s ../mods-available/foo
+
+This will enable module "foo".  Be sure that you have configured the
+module correctly before enabling it, otherwise the server will not
+start.  You can verify the server configuration by running
+"radiusd -XC".
+
+A large number of modules are enabled by default.  This allows the
+server to work with the largest number of authentication protocols.
+Please be careful when disabling modules.  You will likely need to
+edit the "sites-enabled/" files to remove references to any disabled
+modules.
+
+Conditional Modules
+-------------------
+
+Version 3 allows modules to be conditionally loaded.  This is useful
+when you want to have a virtual server which references a module, but
+does not require it.  Instead of editing the virtual server file, you
+can just conditionally enable the module.
+
+Modules are conditionally enabled by adding a "-" before their name in
+a virtual server.  For example, you can do::
+
+  server {
+    authorize {
+      ...
+      ldap
+      -sql
+      ...
+    }
+  }
+
+This says "require the LDAP module, but use the SQL module only if it
+is configured."
+
+This feature is not very useful for production configurations.  It is,
+however, very useful for the default examples that ship with the
+server.
+
+Ignoring module
+---------------
+
+If you see this message::
+
+  Ignoring module (see raddb/mods-available/README.rst)
+
+Then you are in the right place.  Most of the time this message can be
+ignored.  The message can be fixed by find the references to "-module"
+in the virtual server, and deleting them.
+
+Another way to fix it is to configure the module, as described above.
+
+Simplification
+--------------
+
+Allowing conditional modules simplifies the default virtual servers
+that are shipped with FreeRADIUS.  This means that if you want to
+enable LDAP (for example), you no longer need to edit the files in
+raddb/sites-available/ in order to enable it.
+
+Instead, you should edit the raddb/mods-available/ldap file to point
+to your local LDAP server.  Then, enable the module via the soft-link
+method described above.
+
+Once the module is enabled, it will automatically be used in the
+default configuration.
diff --git a/src/test/setup/radius-config/freeradius/mods-available/always b/src/test/setup/radius-config/freeradius/mods-available/always
new file mode 100644
index 0000000..bba5b79
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/always
@@ -0,0 +1,61 @@
+# -*- text -*-
+#
+#  $Id: de3f13089d8951f4c822ebc4007df58e0487de14 $
+
+#
+#  The "always" module is here for debugging purposes, or
+#  for use in complex policies.
+#  Instance simply returns the same result, always, without
+#  doing anything.
+#
+#  rcode may be one of the following values:
+#  - reject   - Reject the user.
+#  - fail     - Simulate or indicate a failure.
+#  - ok       - Simulate or indicate a success.
+#  - handled  - Indicate that the request has been handled,
+#               stop processing, and send response if set.
+#  - invalid  - Indicate that the request is invalid.
+#  - userlock - Indicate that the user account has been
+#               locked out.
+#  - notfound - Indicate that a user account can't be found.
+#  - noop     - Simulate a no-op.
+#  - updated  - Indicate that the request has been updated.
+#
+#  If an instance is listed in a session {}  section, 
+#  this simulates a user having <integer> sessions.
+#  
+#  simulcount = <integer>
+#
+#  If an instance is listed in a session {}  section, 
+#  this simulates the user having multilink
+#  sessions.
+#
+#  mpp = <integer>
+#
+always reject {
+	rcode = reject
+}
+always fail {
+	rcode = fail
+}
+always ok {
+	rcode = ok
+}
+always handled {
+	rcode = handled
+}
+always invalid {
+	rcode = invalid
+}
+always userlock {
+	rcode = userlock
+}
+always notfound {
+	rcode = notfound
+}
+always noop {
+	rcode = noop
+}
+always updated {
+	rcode = updated
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/attr_filter b/src/test/setup/radius-config/freeradius/mods-available/attr_filter
new file mode 100644
index 0000000..360e230
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/attr_filter
@@ -0,0 +1,50 @@
+# -*- text -*-
+#
+#  $Id: 1caff077b2429c948a04777fcd619be901ac83dc $
+
+#
+#  This file defines a number of instances of the "attr_filter" module.
+#
+
+# attr_filter - filters the attributes received in replies from
+# proxied servers, to make sure we send back to our RADIUS client
+# only allowed attributes.
+attr_filter attr_filter.post-proxy {
+	key = "%{Realm}"
+	filename = ${modconfdir}/${.:name}/post-proxy
+}
+
+# attr_filter - filters the attributes in the packets we send to
+# the RADIUS home servers.
+attr_filter attr_filter.pre-proxy {
+	key = "%{Realm}"
+	filename = ${modconfdir}/${.:name}/pre-proxy
+}
+
+# Enforce RFC requirements on the contents of Access-Reject
+# packets.  See the comments at the top of the file for
+# more details.
+#
+attr_filter attr_filter.access_reject {
+	key = "%{User-Name}"
+	filename = ${modconfdir}/${.:name}/access_reject
+}
+
+# Enforce RFC requirements on the contents of Access-Challenge
+# packets.  See the comments at the top of the file for
+# more details.
+#
+attr_filter attr_filter.access_challenge {
+	key = "%{User-Name}"
+	filename = ${modconfdir}/${.:name}/access_challenge
+}
+
+
+#  Enforce RFC requirements on the contents of the
+#  Accounting-Response packets.  See the comments at the
+#  top of the file for more details.
+#
+attr_filter attr_filter.accounting_response {
+	key = "%{User-Name}"
+	filename = ${modconfdir}/${.:name}/accounting_response
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cache b/src/test/setup/radius-config/freeradius/mods-available/cache
new file mode 100644
index 0000000..e679a1f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/cache
@@ -0,0 +1,98 @@
+# -*- text -*-
+#
+#  $Id: 53f2169741ce8c7f78eb525ddc5a6fddf1dcc0cc $
+
+#
+#	A module to cache attributes.  The idea is that you can look
+#	up information in a database, and then cache it.  Repeated
+#	requests for the same information will then have the cached
+#	values added to the request.
+#
+#	The module can cache a fixed set of attributes per key.
+#	It can be listed in "authorize", "post-auth", "pre-proxy"
+#	and "post-proxy".
+#
+#	If you want different things cached for authorize and post-auth,
+#	you will need to define two instances of the "cache" module.
+#
+#	The module returns "ok" if it found a cache entry.
+#	The module returns "updated" if it added a new cache entry.
+#	The module returns "noop" if it did nothing.
+#
+cache {
+	#  The key used to index the cache.  It is dynamically expanded
+	#  at run time.
+	key = "%{User-Name}"
+
+	#  The TTL of cache entries, in seconds.  Entries older than this
+	#  will be expired.
+	#
+	#  You can set the TTL per cache entry, but adding a control
+	#  variable "Cache-TTL".  The value there will over-ride this one.
+	#  Setting a Cache-TTL of 0 means "delete this entry".
+	#
+	#  This value should be between 10 and 86400.
+	ttl = 10
+
+	#  You can flush the cache via
+	#
+	#	radmin -e "set module config cache epoch 123456789"
+	#
+	#  Where last value is a 32-bit Unix timestamp.  Cache entries
+	#  older than this are expired, and new entries added.
+	#
+	#  You should never set the "epoch" configuration item in
+	#  this file.
+
+	#  The module can also operate in status-only mode where it will
+	#  not add new cache entries, or merge existing ones.
+	#
+	#  To enable set the control attribute "Cache-Status-Only" to "yes"
+	#  The module will return "ok" if it found a cache entry.
+	#  The module will return "notfound" if it failed to find a cache entry,
+	#  or the entry had expired.
+	#
+	#  Note: expired entries will still be removed.
+
+	#  If yes the following attributes will be added to the request list:
+	#  	* Cache-Entry-Hits - The number of times this entry has been
+	#			     retrieved.
+	add_stats = no
+
+	#  The list of attributes to cache for a particular key.
+	#  Each key gets the same set of cached attributes.
+	#  The attributes are dynamically expanded at run time.
+	#
+	#  You can specify which list the attribute goes into by
+	#  prefixing the attribute name with the list.  This allows
+	#  you to update multiple lists with one configuration.
+	#
+	#  If no list is specified the default list will be updated.
+	#
+	#  The default list is specified in the same way as unlang update
+	#  stanzas. If no default list is set, it will default to the
+	#  request list.
+	#
+	#  Quoting around values determine how they're processed:
+	#  - double quoted values are xlat expanded.
+	#  - single quoted values are treated as literals.
+	#  - bare values are treated as attribute references.
+	#
+	#  The '+=' operator causes all instances of the reference to
+	#  be cached.
+	#
+	#  Attributes that are generated from processing the update section
+	#  are also added to the current request, as if there'd been a cache
+	#  hit.
+	update {
+		# [outer.]<list>:<attribute> <op> <value>
+
+		# Cache all instances of Reply-Message in the reply list
+		reply:Reply-Message += &reply:Reply-Message
+
+		# Add our own to show when the cache was last updated
+		reply:Reply-Message += "Cache last updated at %t"
+
+		reply:Class := "%{randstr:ssssssssssssssssssssssssssssssss}"
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cache_eap b/src/test/setup/radius-config/freeradius/mods-available/cache_eap
new file mode 100644
index 0000000..e9a3aed
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/cache_eap
@@ -0,0 +1,13 @@
+#
+#	Cache EAP responses for resiliency on intermediary proxy fail-over
+#
+cache cache_eap {
+	key = "%{%{control:State}:-%{%{reply:State}:-%{State}}}"
+
+	ttl = 15
+
+	update reply {
+		reply: += &reply:
+		control:State := &request:State
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/chap b/src/test/setup/radius-config/freeradius/mods-available/chap
new file mode 100644
index 0000000..97d965b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/chap
@@ -0,0 +1,11 @@
+# -*- text -*-
+#
+#  $Id: e2a3cd3b110ffffdbcff86c7fc65a9275ddc3379 $
+
+# CHAP module
+#
+#  To authenticate requests containing a CHAP-Password attribute.
+#
+chap {
+	# no configuration
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/counter b/src/test/setup/radius-config/freeradius/mods-available/counter
new file mode 100644
index 0000000..54a1e00
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/counter
@@ -0,0 +1,82 @@
+# -*- text -*-
+#
+#  $Id: a5ac1e60ef117a2c59ace1a9d061d8f70d1da538 $
+
+#  counter module:
+#  This module takes an attribute (count-attribute).
+#  It also takes a key, and creates a counter for each unique
+#  key.  The count is incremented when accounting packets are
+#  received by the server.  The value of the increment depends
+#  on the attribute type.
+#  If the attribute is Acct-Session-Time or of an integer type we add
+#  the value of the attribute. If it is anything else we increase the
+#  counter by one.
+#
+#  The 'reset' parameter defines when the counters are all reset to
+#  zero.  It can be hourly, daily, weekly, monthly or never.
+#
+#  hourly: Reset on 00:00 of every hour
+#  daily: Reset on 00:00:00 every day
+#  weekly: Reset on 00:00:00 on sunday
+#  monthly: Reset on 00:00:00 of the first day of each month
+#
+#  It can also be user defined. It should be of the form:
+#  num[hdwm] where:
+#  h: hours, d: days, w: weeks, m: months
+#  If the letter is omitted days will be assumed. In example:
+#  reset = 10h (reset every 10 hours)
+#  reset = 12  (reset every 12 days)
+#
+#
+#  The check_name attribute defines an attribute which will be
+#  registered by the counter module and can be used to set the
+#  maximum allowed value for the counter after which the user
+#  is rejected.
+#  Something like:
+#
+#  DEFAULT Max-Daily-Session := 36000
+#          Fall-Through = 1
+#
+#  You should add the counter module in the instantiate
+#  section so that it registers check_name before the files
+#  module reads the users file.
+#
+#  If check_name is set and the user is to be rejected then we
+#  send back a Reply-Message and we log a Failure-Message in
+#  the radius.log
+#
+#  If the count attribute is Acct-Session-Time then on each
+#  login we send back the remaining online time as a
+#  Session-Timeout attribute ELSE and if the reply_name is
+#  set, we send back that attribute.  The reply_name attribute
+#  MUST be of an integer type.
+#
+#  The counter-name can also be used instead of using the check_name
+#  like below:
+#
+#  DEFAULT  Daily-Session-Time > 3600, Auth-Type = Reject
+#      Reply-Message = "You've used up more than one hour today"
+#
+#  The allowed_service_type attribute can be used to only take
+#  into account specific sessions. For example if a user first
+#  logs in through a login menu and then selects ppp there will
+#  be two sessions. One for Login-User and one for Framed-User
+#  service type. We only need to take into account the second one.
+#
+#  The module should be added in the instantiate, authorize and
+#  accounting sections.  Make sure that in the authorize
+#  section it comes after any module which sets the
+#  'check_name' attribute.
+#
+counter daily {
+	filename = ${db_dir}/db.daily
+	key = User-Name
+	count_attribute = Acct-Session-Time
+	reset = daily
+	counter_name = Daily-Session-Time
+	check_name = Max-Daily-Session
+	reply_name = Session-Timeout
+	allowed_service_type = Framed-User
+	cache_size = 5000
+}
+
diff --git a/src/test/setup/radius-config/freeradius/mods-available/cui b/src/test/setup/radius-config/freeradius/mods-available/cui
new file mode 100644
index 0000000..cb6fe5d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/cui
@@ -0,0 +1,53 @@
+# -*- text -*-
+#
+#  $Id: b72aa309bfc05c2443e4bb2db061b8f33de8e359 $
+
+#
+#  Write Chargeable-User-Identity to the database.
+#
+#  Schema	raddb/sql/cui/<DB>/schema.sql
+#  Queries	raddb/sql/cui/<DB>/queries.conf
+#
+sql cuisql {
+
+	# The dialect of SQL you want to use, this should usually match
+	# the driver below.
+	#
+	# If you're using rlm_sql_null, then it should be the type of
+	# database the logged queries are going to be executed against.
+	dialect = "sqlite"
+
+	# The sub-module to use to execute queries. This should match
+	# the database you're attempting to connect to.
+	#
+	# There are CUI queries available for:
+	#    * rlm_sql_mysql
+	#    * rlm_sql_postgresql
+	#    * rlm_sql_sqlite
+	#    * rlm_sql_null (log queries to disk)
+	#
+	driver = "rlm_sql_${dialect}"
+
+	sqlite {
+		filename = ${radacctdir}/cui.sqlite
+		bootstrap = ${modconfdir}/${..:name}/cui/sqlite/schema.sql
+	}
+
+	# Write CUI queries to a logfile. Useful for debugging.
+#	logfile = ${logdir}/cuilog.sql
+
+	pool {
+		start = 5
+		min = 4
+		max = 10
+		spare = 3
+		uses = 0
+		lifetime = 0
+		idle_timeout = 60
+	}
+
+	cui_table = "cui"
+	sql_user_name = "%{User-Name}"
+
+	$INCLUDE ${modconfdir}/${.:name}/cui/${dialect}/queries.conf
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/date b/src/test/setup/radius-config/freeradius/mods-available/date
new file mode 100644
index 0000000..bd4737d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/date
@@ -0,0 +1,14 @@
+#
+#  Registers xlat to convert between time formats.
+#
+#  xlat input string is an attribute name. If this attribute is of date
+#  or integer type, the date xlat will convert it to a time string in
+#  the format of the format config item.
+#
+#  If the attribute is a string type, date will attempt to parse it in
+#  the format specified by the format config item, and will expand
+#  to a Unix timestamp.
+#
+date {
+	format = "%b %e %Y %H:%M:%S %Z"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/detail b/src/test/setup/radius-config/freeradius/mods-available/detail
new file mode 100644
index 0000000..e3cc38a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/detail
@@ -0,0 +1,93 @@
+# -*- text -*-
+#
+#  $Id: f235eb9a0ab4de42f773f3aea3810d1dcde99bd1 $
+
+# Write a detailed log of all accounting records received.
+#
+detail {
+	#  Note that we do NOT use NAS-IP-Address here, as
+	#  that attribute MAY BE from the originating NAS, and
+	#  NOT from the proxy which actually sent us the
+	#  request.
+	#
+	#  The following line creates a new detail file for
+	#  every radius client (by IP address or hostname).
+	#  In addition, a new detail file is created every
+	#  day, so that the detail file doesn't have to go
+	#  through a 'log rotation'
+	#
+	#  If your detail files are large, you may also want
+	#  to add a ':%H' (see doc/variables.txt) to the end
+	#  of it, to create a new detail file every hour, e.g.:
+	#
+	#   ..../detail-%Y%m%d:%H
+	#
+	#  This will create a new detail file for every hour.
+	#
+	#  If you are reading detail files via the "listen" section
+	#  (e.g. as in raddb/sites-available/robust-proxy-accounting),
+	#  you MUST use a unique directory for each combination of a
+	#  detail file writer, and reader.  That is, there can only
+	#  be ONE "listen" section reading detail files from a
+	#  particular directory.
+	#
+	filename = ${radacctdir}/%{%{Packet-Src-IP-Address}:-%{Packet-Src-IPv6-Address}}/detail-%Y%m%d
+
+	#
+	#  If you are using radrelay, delete the above line for "file",
+	#  and use this one instead:
+	#
+#	filename = ${radacctdir}/detail
+
+	#
+	#  The Unix-style permissions on the 'detail' file.
+	#
+	#  The detail file often contains secret or private
+	#  information about users.  So by keeping the file
+	#  permissions restrictive, we can prevent unwanted
+	#  people from seeing that information.
+	permissions = 0600
+
+	# The Unix group of the log file.
+	#
+	# The user that the server runs as must be in the specified
+	# system group otherwise this will fail to work.
+	#
+#	group = ${security.group}
+
+	#
+	#  Every entry in the detail file has a header which
+	#  is a timestamp.  By default, we use the ctime
+	#  format (see "man ctime" for details).
+	#
+	#  The header can be customised by editing this
+	#  string.  See "doc/variables.txt" for a description
+	#  of what can be put here.
+	#
+	header = "%t"
+
+	#
+	#  Uncomment this line if the detail file reader will be
+	#  reading this detail file.
+	#
+#	locking = yes
+
+	#
+	#  Log the Packet src/dst IP/port.  This is disabled by
+	#  default, as that information isn't used by many people.
+	#
+#	log_packet_header = yes
+
+	#
+	# Certain attributes such as User-Password may be
+	# "sensitive", so they should not be printed in the
+	# detail file.  This section lists the attributes
+	# that should be suppressed.
+	#
+	# The attributes should be listed one to a line.
+	#
+	#suppress {
+		# User-Password
+	#}
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/detail.example.com b/src/test/setup/radius-config/freeradius/mods-available/detail.example.com
new file mode 100644
index 0000000..745e1f1
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/detail.example.com
@@ -0,0 +1,27 @@
+# -*- text -*-
+#
+#  Detail file writer, used in the following examples:
+#
+#	raddb/sites-available/robust-proxy-accounting
+#	raddb/sites-available/decoupled-accounting
+#
+#  Note that this module can write detail files that are read by
+#  only ONE "listen" section.  If you use BOTH of the examples
+#  above, you will need to define TWO "detail" modules.
+#
+#  e.g. detail1.example.com && detail2.example.com
+#
+#
+#  We write *multiple* detail files here.  They will be processed by
+#  the detail "listen" section in the order that they were created.
+#  The directory containing these files should NOT be used for any
+#  other purposes.  i.e. It should have NO other files in it.
+#
+#  Writing multiple detail enables the server to process the pieces
+#  in smaller chunks.  This helps in certain catastrophic corner cases.
+#
+#  $Id: 827cdf57e70dc2ff2252016194f4bb846eecead2 $
+#
+detail detail.example.com {
+	filename = ${radacctdir}/detail.example.com/detail-%Y%m%d:%H:%G
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dhcp b/src/test/setup/radius-config/freeradius/mods-available/dhcp
new file mode 100644
index 0000000..d4e9c85
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/dhcp
@@ -0,0 +1,19 @@
+# -*- text -*-
+#
+#  $Id: a4316335d7f73b37ec5aa9278de91d37dd28eddc $
+
+#
+#  This module is useful only for 'xlat'.  To use it,
+#  put 'dhcp' into the 'instantiate' section.
+#
+#  %{dhcp_options:<Attribute-ref>} may be used to decode
+#  DHCP options data included in RADIUS packets by vendors
+#  of DHCP to RADIUS gateways.
+#
+#  This is known to work with the following VSAs:
+#	* Juniper		- ERX-Dhcp-Options
+#	* Alcatel lucent SR	- Alc-ToServer-Dhcp-Options
+#				- Alc-ToClient-Dhcp-Options
+#
+dhcp {
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool b/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool
new file mode 100644
index 0000000..ed04d73
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/dhcp_sqlippool
@@ -0,0 +1,36 @@
+##  Configuration for DHCP to use SQL IP Pools.
+##
+##  See sqlippool.conf for common configuration explanation
+##
+##  $Id: 8d459c06a16b77eff7b976e32838dbc1195d901f $
+
+sqlippool dhcp_sqlippool {
+	sql_instance_name = "sql"
+
+	ippool_table = "radippool"
+
+	lease_duration = 7200
+
+	# Client's MAC address is mapped to Calling-Station-Id in policy.conf
+	pool_key = "%{Calling-Station-Id}"
+
+	# For now, it works with MySQL.
+	$INCLUDE ${modconfdir}/sql/ippool-dhcp/mysql/queries.conf
+
+	# It may also work with sqlite - this is very experimental.
+	# Comment out the above line and add the following include.
+	# To use sqlite you need to add '%' to safe_characters in
+	# raddb/mods-config/sql/main/sqlite/queries.conf.
+	# $INCLUDE ${modconfdir}/sql/ippool-dhcp/sqlite/queries.conf
+
+	sqlippool_log_exists = "DHCP: Existing IP: %{reply:Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+	sqlippool_log_success = "DHCP: Allocated IP: %{reply:Framed-IP-Address} from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+	sqlippool_log_clear = "DHCP: Released IP %{Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} user %{User-Name})"
+
+	sqlippool_log_failed = "DHCP: IP Allocation FAILED from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+	sqlippool_log_nopool = "DHCP: No Pool-Name defined (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/digest b/src/test/setup/radius-config/freeradius/mods-available/digest
new file mode 100644
index 0000000..af52017
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/digest
@@ -0,0 +1,13 @@
+# -*- text -*-
+#
+#  $Id: f0aa9edf9da33d63fe03e7d1ed3cbca848eec54d $
+
+#
+#  The 'digest' module currently has no configuration.
+#
+#  "Digest" authentication against a Cisco SIP server.
+#  See 'doc/rfc/draft-sterman-aaa-sip-00.txt' for details
+#  on performing digest authentication for Cisco SIP servers.
+#
+digest {
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients b/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients
new file mode 100644
index 0000000..c5c9c8a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/dynamic_clients
@@ -0,0 +1,32 @@
+# -*- text -*-
+#
+#  $Id: cc2bd5fd22aa473b98af5dde3fac7a66e39a9e9d $
+
+# This module loads RADIUS clients as needed, rather than when the server
+# starts.
+#
+#  There are no configuration entries for this module.  Instead, it
+#  relies on the "client" configuration.  You must:
+#
+#	1) link raddb/sites-enabled/dynamic_clients to
+#	   raddb/sites-available/dynamic_clients
+#
+#	2) Define a client network/mask (see top of the above file)
+#
+#	3) uncomment the "directory" entry in that client definition
+#
+#	4) list "dynamic_clients" in the "authorize" section of the
+#	   "dynamic_clients' virtual server.  The default example already
+#	   does this.
+#
+#	5) put files into the above directory, one per IP.
+#	   e.g. file "192.0.2.1" should contain a normal client definition
+#	   for a client with IP address 192.0.2.1.
+#
+#  For more documentation, see the file:
+#
+#	raddb/sites-available/dynamic-clients
+#
+dynamic_clients {
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/eap b/src/test/setup/radius-config/freeradius/mods-available/eap
new file mode 100644
index 0000000..800c50b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/eap
@@ -0,0 +1,785 @@
+# -*- text -*-
+##
+##  eap.conf -- Configuration for EAP types (PEAP, TTLS, etc.)
+##
+##	$Id: 0fffa886244eb9cfce13103d551b7a30f6538802 $
+
+#######################################################################
+#
+#  Whatever you do, do NOT set 'Auth-Type := EAP'.  The server
+#  is smart enough to figure this out on its own.  The most
+#  common side effect of setting 'Auth-Type := EAP' is that the
+#  users then cannot use ANY other authentication method.
+#
+#  EAP types NOT listed here may be supported via the "eap2" module.
+#  See experimental.conf for documentation.
+#
+eap {
+	#  Invoke the default supported EAP type when
+	#  EAP-Identity response is received.
+	#
+	#  The incoming EAP messages DO NOT specify which EAP
+	#  type they will be using, so it MUST be set here.
+	#
+	#  For now, only one default EAP type may be used at a time.
+	#
+	#  If the EAP-Type attribute is set by another module,
+	#  then that EAP type takes precedence over the
+	#  default type configured here.
+	#
+	default_eap_type = TLS
+	#  A list is maintained to correlate EAP-Response
+	#  packets with EAP-Request packets.  After a
+	#  configurable length of time, entries in the list
+	#  expire, and are deleted.
+	#
+	timer_expire     = 60
+
+	#  There are many EAP types, but the server has support
+	#  for only a limited subset.  If the server receives
+	#  a request for an EAP type it does not support, then
+	#  it normally rejects the request.  By setting this
+	#  configuration to "yes", you can tell the server to
+	#  instead keep processing the request.  Another module
+	#  MUST then be configured to proxy the request to
+	#  another RADIUS server which supports that EAP type.
+	#
+	#  If another module is NOT configured to handle the
+	#  request, then the request will still end up being
+	#  rejected.
+	ignore_unknown_eap_types = no
+
+	# Cisco AP1230B firmware 12.2(13)JA1 has a bug.  When given
+	# a User-Name attribute in an Access-Accept, it copies one
+	# more byte than it should.
+	#
+	# We can work around it by configurably adding an extra
+	# zero byte.
+	cisco_accounting_username_bug = no
+
+	#
+	#  Help prevent DoS attacks by limiting the number of
+	#  sessions that the server is tracking.  For simplicity,
+	#  this is taken from the "max_requests" directive in
+	#  radiusd.conf.
+	max_sessions = ${max_requests}
+
+	# Supported EAP-types
+
+	#
+	#  We do NOT recommend using EAP-MD5 authentication
+	#  for wireless connections.  It is insecure, and does
+	#  not provide for dynamic WEP keys.
+	#
+	mschapv2 {
+	}
+
+	#
+	# EAP-pwd -- secure password-based authentication
+	#
+#	pwd {
+#		group = 19
+
+		#
+#		server_id = theserver@example.com
+
+		#  This has the same meaning as for TLS.
+#		fragment_size = 1020
+
+		# The virtual server which determines the
+		# "known good" password for the user.
+		# Note that unlike TLS, only the "authorize"
+		# section is processed.  EAP-PWD requests can be
+		# distinguished by having a User-Name, but
+		# no User-Password, CHAP-Password, EAP-Message, etc.
+#		virtual_server = "inner-tunnel"
+#	}
+
+	# Cisco LEAP
+	#
+	#  We do not recommend using LEAP in new deployments.  See:
+	#  http://www.securiteam.com/tools/5TP012ACKE.html
+	#
+	#  Cisco LEAP uses the MS-CHAP algorithm (but not
+	#  the MS-CHAP attributes) to perform it's authentication.
+	#
+	#  As a result, LEAP *requires* access to the plain-text
+	#  User-Password, or the NT-Password attributes.
+	#  'System' authentication is impossible with LEAP.
+	#
+	leap {
+	}
+
+	#  Generic Token Card.
+	#
+	#  Currently, this is only permitted inside of EAP-TTLS,
+	#  or EAP-PEAP.  The module "challenges" the user with
+	#  text, and the response from the user is taken to be
+	#  the User-Password.
+	#
+	#  Proxying the tunneled EAP-GTC session is a bad idea,
+	#  the users password will go over the wire in plain-text,
+	#  for anyone to see.
+	#
+	gtc {
+		#  The default challenge, which many clients
+		#  ignore..
+		#challenge = "Password: "
+
+		#  The plain-text response which comes back
+		#  is put into a User-Password attribute,
+		#  and passed to another module for
+		#  authentication.  This allows the EAP-GTC
+		#  response to be checked against plain-text,
+		#  or crypt'd passwords.
+		#
+		#  If you say "Local" instead of "PAP", then
+		#  the module will look for a User-Password
+		#  configured for the request, and do the
+		#  authentication itself.
+		#
+		auth_type = PAP
+	}
+
+	## Common TLS configuration for TLS-based EAP types
+	#
+	#  See raddb/certs/README for additional comments
+	#  on certificates.
+	#
+	#  If OpenSSL was not found at the time the server was
+	#  built, the "tls", "ttls", and "peap" sections will
+	#  be ignored.
+	#
+	#  If you do not currently have certificates signed by
+	#  a trusted CA you may use the 'snakeoil' certificates.
+	#  Included with the server in raddb/certs.
+	#
+	#  If these certificates have not been auto-generated:
+	#    cd raddb/certs
+	#    make
+	#
+	#  These test certificates SHOULD NOT be used in a normal
+	#  deployment.  They are created only to make it easier
+	#  to install the server, and to perform some simple
+	#  tests with EAP-TLS, TTLS, or PEAP.
+	#
+	#  See also:
+	#
+	#  http://www.dslreports.com/forum/remark,9286052~mode=flat
+	#
+	#  Note that you should NOT use a globally known CA here!
+	#  e.g. using a Verisign cert as a "known CA" means that
+	#  ANYONE who has a certificate signed by them can
+	#  authenticate via EAP-TLS!  This is likely not what you want.
+	tls-config tls-common {
+		#private_key_password = 
+		private_key_file = ${certdir}/server.pem
+
+		#  If Private key & Certificate are located in
+		#  the same file, then private_key_file &
+		#  certificate_file must contain the same file
+		#  name.
+		#
+		#  If ca_file (below) is not used, then the
+		#  certificate_file below MUST include not
+		#  only the server certificate, but ALSO all
+		#  of the CA certificates used to sign the
+		#  server certificate.
+		certificate_file = ${certdir}/server.pem
+		#  Trusted Root CA list
+		#
+		#  ALL of the CA's in this list will be trusted
+		#  to issue client certificates for authentication.
+		#
+		#  In general, you should use self-signed
+		#  certificates for 802.1x (EAP) authentication.
+		#  In that case, this CA file should contain
+		#  *one* CA certificate.
+		#
+		#  This parameter is used only for EAP-TLS,
+		#  when you issue client certificates.  If you do
+		#  not use client certificates, and you do not want
+		#  to permit EAP-TLS authentication, then delete
+		#  this configuration item.
+		ca_file = ${cadir}/ca.pem
+
+		#
+		#  If OpenSSL supports TLS-PSK, then we can use
+		#  a PSK identity and (hex) password.  When the
+		#  following two configuration items are specified,
+		#  then certificate-based configuration items are
+		#  not allowed.  e.g.:
+		#
+		#	#private_key_password
+		#	private_key_file
+		#	certificate_file
+		#	ca_file
+		#	ca_path
+		#
+		#  For now, the identity is fixed, and must be the
+		#  same on the client.  The passphrase must be a hex
+		#  value, and can be up to 256 hex digits.
+		#
+		#  Future versions of the server may be able to
+		#  look up the shared key (hexphrase) based on the
+		#  identity.
+		#
+	#	psk_identity = "test"
+	#	psk_hexphrase = "036363823"
+
+		#
+		#  For DH cipher suites to work, you have to
+		#  run OpenSSL to create the DH file first:
+		#
+		#  	openssl dhparam -out certs/dh 1024
+		#
+		dh_file = ${certdir}/dh
+
+		#
+		#  If your system doesn't have /dev/urandom,
+		#  you will need to create this file, and
+		#  periodically change its contents.
+		#
+		#  For security reasons, FreeRADIUS doesn't
+		#  write to files in its configuration
+		#  directory.
+		#
+#		random_file = ${certdir}/random
+
+		#
+		#  This can never exceed the size of a RADIUS
+		#  packet (4096 bytes), and is preferably half
+		#  that, to accommodate other attributes in
+		#  RADIUS packet.  On most APs the MAX packet
+		#  length is configured between 1500 - 1600
+		#  In these cases, fragment size should be
+		#  1024 or less.
+		#
+	#	fragment_size = 1024
+
+		#  include_length is a flag which is
+		#  by default set to yes If set to
+		#  yes, Total Length of the message is
+		#  included in EVERY packet we send.
+		#  If set to no, Total Length of the
+		#  message is included ONLY in the
+		#  First packet of a fragment series.
+		#
+	#	include_length = yes
+
+		#  Check the Certificate Revocation List
+		#
+		#  1) Copy CA certificates and CRLs to same directory.
+		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
+		#    'c_rehash' is OpenSSL's command.
+		#  3) uncomment the line below.
+		#  5) Restart radiusd
+	#	check_crl = yes
+		ca_path = ${cadir}
+
+	       #
+	       #  If check_cert_issuer is set, the value will
+	       #  be checked against the DN of the issuer in
+	       #  the client certificate.  If the values do not
+	       #  match, the certificate verification will fail,
+	       #  rejecting the user.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-Issuer attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
+
+	       #
+	       #  If check_cert_cn is set, the value will
+	       #  be xlat'ed and checked against the CN
+	       #  in the client certificate.  If the values
+	       #  do not match, the certificate verification
+	       #  will fail rejecting the user.
+	       #
+	       #  This check is done only if the previous
+	       #  "check_cert_issuer" is not set, or if
+	       #  the check succeeds.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-CN attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#	check_cert_cn = %{User-Name}
+	#
+		# Set this option to specify the allowed
+		# TLS cipher suites.  The format is listed
+		# in "man 1 ciphers".
+		cipher_list = "DEFAULT"
+
+		#
+
+		#
+		#  Elliptical cryptography configuration
+		#
+		#  Only for OpenSSL >= 0.9.8.f
+		#
+		ecdh_curve = "prime256v1"
+
+		#
+		#  Session resumption / fast reauthentication
+		#  cache.
+		#
+		#  The cache contains the following information:
+		#
+		#  session Id - unique identifier, managed by SSL
+		#  User-Name  - from the Access-Accept
+		#  Stripped-User-Name - from the Access-Request
+		#  Cached-Session-Policy - from the Access-Accept
+		#
+		#  The "Cached-Session-Policy" is the name of a
+		#  policy which should be applied to the cached
+		#  session.  This policy can be used to assign
+		#  VLANs, IP addresses, etc.  It serves as a useful
+		#  way to re-apply the policy from the original
+		#  Access-Accept to the subsequent Access-Accept
+		#  for the cached session.
+		#
+		#  On session resumption, these attributes are
+		#  copied from the cache, and placed into the
+		#  reply list.
+		#
+		#  You probably also want "use_tunneled_reply = yes"
+		#  when using fast session resumption.
+		#
+		cache {
+		      #
+		      #  Enable it.  The default is "no".
+		      #  Deleting the entire "cache" subsection
+		      #  Also disables caching.
+		      #
+		      #  You can disallow resumption for a
+		      #  particular user by adding the following
+		      #  attribute to the control item list:
+		      #
+		      #		Allow-Session-Resumption = No
+		      #
+		      #  If "enable = no" below, you CANNOT
+		      #  enable resumption for just one user
+		      #  by setting the above attribute to "yes".
+		      #
+		      enable = yes
+
+		      #
+		      #  Lifetime of the cached entries, in hours.
+		      #  The sessions will be deleted after this
+		      #  time.
+		      #
+		      lifetime = 24 # hours
+
+		      #
+		      #  The maximum number of entries in the
+		      #  cache.  Set to "0" for "infinite".
+		      #
+		      #  This could be set to the number of users
+		      #  who are logged in... which can be a LOT.
+		      #
+		      max_entries = 255
+
+		      #
+		      #  Internal "name" of the session cache.
+		      #  Used to distinguish which TLS context
+		      #  sessions belong to.
+		      #
+		      #  The server will generate a random value
+		      #  if unset. This will change across server
+		      #  restart so you MUST set the "name" if you
+		      #  want to persist sessions (see below).
+		      #
+		      #name = "EAP module"
+
+		      #
+		      #  Simple directory-based storage of sessions.
+		      #  Two files per session will be written, the SSL
+		      #  state and the cached VPs. This will persist session
+		      #  across server restarts.
+		      #
+		      #  The server will need write perms, and the directory
+		      #  should be secured from anyone else. You might want
+		      #  a script to remove old files from here periodically:
+		      #
+		      #    find ${logdir}/tlscache -mtime +2 -exec rm -f {} \;
+		      #
+		      #  This feature REQUIRES "name" option be set above.
+		      #
+		      #persist_dir = "${logdir}/tlscache"
+		}
+
+		#
+		#  As of version 2.1.10, client certificates can be
+		#  validated via an external command.  This allows
+		#  dynamic CRLs or OCSP to be used.
+		#
+		#  This configuration is commented out in the
+		#  default configuration.  Uncomment it, and configure
+		#  the correct paths below to enable it.
+		#
+		verify {
+			#  A temporary directory where the client
+			#  certificates are stored.  This directory
+			#  MUST be owned by the UID of the server,
+			#  and MUST not be accessible by any other
+			#  users.  When the server starts, it will do
+			#  "chmod go-rwx" on the directory, for
+			#  security reasons.  The directory MUST
+			#  exist when the server starts.
+			#
+			#  You should also delete all of the files
+			#  in the directory when the server starts.
+	#     		tmpdir = /tmp/radiusd
+
+			#  The command used to verify the client cert.
+			#  We recommend using the OpenSSL command-line
+			#  tool.
+			#
+			#  The ${..ca_path} text is a reference to
+			#  the ca_path variable defined above.
+			#
+			#  The %{TLS-Client-Cert-Filename} is the name
+			#  of the temporary file containing the cert
+			#  in PEM format.  This file is automatically
+			#  deleted by the server when the command
+			#  returns.
+	#    		client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}"
+		}
+
+		#
+		#  OCSP Configuration
+		#  Certificates can be verified against an OCSP
+		#  Responder. This makes it possible to immediately
+		#  revoke certificates without the distribution of
+		#  new Certificate Revocation Lists (CRLs).
+		#
+		ocsp {
+		      #
+		      #  Enable it.  The default is "no".
+		      #  Deleting the entire "ocsp" subsection
+		      #  Also disables ocsp checking
+		      #
+		      enable = no
+
+		      #
+		      #  The OCSP Responder URL can be automatically
+		      #  extracted from the certificate in question.
+		      #  To override the OCSP Responder URL set
+		      #  "override_cert_url = yes".
+		      #
+		      override_cert_url = yes
+
+		      #
+		      #  If the OCSP Responder address is not
+		      #  extracted from the certificate, the
+		      #  URL can be defined here.
+
+		      #
+		      #  Limitation: Currently the HTTP
+		      #  Request is not sending the "Host: "
+		      #  information to the web-server.  This
+		      #  can be a problem if the OCSP
+		      #  Responder is running as a vhost.
+		      #
+		      url = "http://127.0.0.1/ocsp/"
+
+		      #
+		      # If the OCSP Responder can not cope with nonce
+		      # in the request, then it can be disabled here.
+		      #
+		      # For security reasons, disabling this option
+		      # is not recommended as nonce protects against
+		      # replay attacks.
+		      #
+		      # Note that Microsoft AD Certificate Services OCSP
+		      # Responder does not enable nonce by default. It is
+		      # more secure to enable nonce on the responder than
+		      # to disable it in the query here.
+		      # See http://technet.microsoft.com/en-us/library/cc770413%28WS.10%29.aspx
+		      #
+		      # use_nonce = yes
+
+		      #
+		      # Number of seconds before giving up waiting
+		      # for OCSP response. 0 uses system default.
+		      #
+		      # timeout = 0
+
+		      #
+		      # Normally an error in querying the OCSP
+		      # responder (no response from server, server did
+		      # not understand the request, etc) will result in
+		      # a validation failure.
+		      #
+		      # To treat these errors as 'soft' failures and
+		      # still accept the certificate, enable this
+		      # option.
+		      #
+		      # Warning: this may enable clients with revoked
+		      # certificates to connect if the OCSP responder
+		      # is not available. Use with caution.
+		      #
+		      # softfail = no
+		}
+	}
+
+	## EAP-TLS
+	#
+	#  As of Version 3.0, the TLS configuration for TLS-based
+	#  EAP types is above in the "tls-config" section.
+	#
+	tls {
+		# Point to the common TLS configuration
+		tls = tls-common
+
+		cipher_list = "DEFAULT"
+		#
+		# As part of checking a client certificate, the EAP-TLS
+		# sets some attributes such as TLS-Client-Cert-CN. This
+		# virtual server has access to these attributes, and can
+		# be used to accept or reject the request.
+		#
+	#	virtual_server = check-eap-tls
+	}
+
+
+	## EAP-TTLS
+	#
+	#  The TTLS module implements the EAP-TTLS protocol,
+	#  which can be described as EAP inside of Diameter,
+	#  inside of TLS, inside of EAP, inside of RADIUS...
+	#
+	#  Surprisingly, it works quite well.
+	#
+	ttls {
+		#  Which tls-config section the TLS negotiation parameters
+		#  are in - see EAP-TLS above for an explanation.
+		#
+		#  In the case that an old configuration from FreeRADIUS
+		#  v2.x is being used, all the options of the tls-config
+		#  section may also appear instead in the 'tls' section
+		#  above. If that is done, the tls= option here (and in
+		#  tls above) MUST be commented out.
+		#
+		tls = tls-common
+
+		#  The tunneled EAP session needs a default EAP type
+		#  which is separate from the one for the non-tunneled
+		#  EAP module.  Inside of the TTLS tunnel, we recommend
+		#  using EAP-MD5.  If the request does not contain an
+		#  EAP conversation, then this configuration entry is
+		#  ignored.
+		#
+		default_eap_type = mschapv2
+
+		#  The tunneled authentication request does not usually
+		#  contain useful attributes like 'Calling-Station-Id',
+		#  etc.  These attributes are outside of the tunnel,
+		#  and normally unavailable to the tunneled
+		#  authentication request.
+		#
+		#  By setting this configuration entry to 'yes',
+		#  any attribute which is NOT in the tunneled
+		#  authentication request, but which IS available
+		#  outside of the tunnel, is copied to the tunneled
+		#  request.
+		#
+		#  allowed values: {no, yes}
+		#
+		copy_request_to_tunnel = no
+
+		#  The reply attributes sent to the NAS are usually
+		#  based on the name of the user 'outside' of the
+		#  tunnel (usually 'anonymous').  If you want to send
+		#  the reply attributes based on the user name inside
+		#  of the tunnel, then set this configuration entry to
+		#  'yes', and the reply to the NAS will be taken from
+		#  the reply to the tunneled request.
+		#
+		#  allowed values: {no, yes}
+		#
+		use_tunneled_reply = no
+
+		#
+		#  The inner tunneled request can be sent
+		#  through a virtual server constructed
+		#  specifically for this purpose.
+		#
+		#  If this entry is commented out, the inner
+		#  tunneled request will be sent through
+		#  the virtual server that processed the
+		#  outer requests.
+		#
+		virtual_server = "inner-tunnel"
+
+		#  This has the same meaning, and overwrites, the
+		#  same field in the "tls" configuration, above.
+		#  The default value here is "yes".
+		#
+	#	include_length = yes
+
+		#
+		# Unlike EAP-TLS, EAP-TTLS does not require a client
+		# certificate. However, you can require one by setting the
+		# following option. You can also override this option by
+		# setting
+		#
+		#	EAP-TLS-Require-Client-Cert = Yes
+		#
+		# in the control items for a request.
+		#
+	#	require_client_cert = yes
+	}
+
+
+	## EAP-PEAP
+	#
+
+	##################################################
+	#
+	#  !!!!! WARNINGS for Windows compatibility  !!!!!
+	#
+	##################################################
+	#
+	#  If you see the server send an Access-Challenge,
+	#  and the client never sends another Access-Request,
+	#  then
+	#
+	#		STOP!
+	#
+	#  The server certificate has to have special OID's
+	#  in it, or else the Microsoft clients will silently
+	#  fail.  See the "scripts/xpextensions" file for
+	#  details, and the following page:
+	#
+	#	http://support.microsoft.com/kb/814394/en-us
+	#
+	#  For additional Windows XP SP2 issues, see:
+	#
+	#	http://support.microsoft.com/kb/885453/en-us
+	#
+	#
+	#  If is still doesn't work, and you're using Samba,
+	#  you may be encountering a Samba bug.  See:
+	#
+	#	https://bugzilla.samba.org/show_bug.cgi?id=6563
+	#
+	#  Note that we do not necessarily agree with their
+	#  explanation... but the fix does appear to work.
+	#
+	##################################################
+
+	#
+	#  The tunneled EAP session needs a default EAP type
+	#  which is separate from the one for the non-tunneled
+	#  EAP module.  Inside of the TLS/PEAP tunnel, we
+	#  recommend using EAP-MS-CHAPv2.
+	#
+	peap {
+		#  Which tls-config section the TLS negotiation parameters
+		#  are in - see EAP-TLS above for an explanation.
+		#
+		#  In the case that an old configuration from FreeRADIUS
+		#  v2.x is being used, all the options of the tls-config
+		#  section may also appear instead in the 'tls' section
+		#  above. If that is done, the tls= option here (and in
+		#  tls above) MUST be commented out.
+		#
+		tls = tls-common
+
+		#  The tunneled EAP session needs a default
+		#  EAP type which is separate from the one for
+		#  the non-tunneled EAP module.  Inside of the
+		#  PEAP tunnel, we recommend using MS-CHAPv2,
+		#  as that is the default type supported by
+		#  Windows clients.
+		#
+		default_eap_type = mschapv2
+
+		#  The PEAP module also has these configuration
+		#  items, which are the same as for TTLS.
+		#
+		copy_request_to_tunnel = no
+		use_tunneled_reply = no
+
+		#  When the tunneled session is proxied, the
+		#  home server may not understand EAP-MSCHAP-V2.
+		#  Set this entry to "no" to proxy the tunneled
+		#  EAP-MSCHAP-V2 as normal MSCHAPv2.
+		#
+	#	proxy_tunneled_request_as_eap = yes
+
+		#
+		#  The inner tunneled request can be sent
+		#  through a virtual server constructed
+		#  specifically for this purpose.
+		#
+		#  If this entry is commented out, the inner
+		#  tunneled request will be sent through
+		#  the virtual server that processed the
+		#  outer requests.
+		#
+		virtual_server = "inner-tunnel"
+
+		# This option enables support for MS-SoH
+		# see doc/SoH.txt for more info.
+		# It is disabled by default.
+		#
+	#	soh = yes
+
+		#
+		# The SoH reply will be turned into a request which
+		# can be sent to a specific virtual server:
+		#
+	#	soh_virtual_server = "soh-server"
+
+		#
+		# Unlike EAP-TLS, PEAP does not require a client certificate.
+		# However, you can require one by setting the following
+		# option. You can also override this option by setting
+		#
+		#	EAP-TLS-Require-Client-Cert = Yes
+		#
+		# in the control items for a request.
+		#
+	#	require_client_cert = yes
+	}
+
+	#
+	#  This takes no configuration.
+	#
+	#  Note that it is the EAP MS-CHAPv2 sub-module, not
+	#  the main 'mschap' module.
+	#
+	#  Note also that in order for this sub-module to work,
+	#  the main 'mschap' module MUST ALSO be configured.
+	#
+	#  This module is the *Microsoft* implementation of MS-CHAPv2
+	#  in EAP.  There is another (incompatible) implementation
+	#  of MS-CHAPv2 in EAP by Cisco, which FreeRADIUS does not
+	#  currently support.
+	#
+	mschapv2 {
+		#  Prior to version 2.1.11, the module never
+		#  sent the MS-CHAP-Error message to the
+		#  client.  This worked, but it had issues
+		#  when the cached password was wrong.  The
+		#  server *should* send "E=691 R=0" to the
+		#  client, which tells it to prompt the user
+		#  for a new password.
+		#
+		#  The default is to behave as in 2.1.10 and
+		#  earlier, which is known to work.  If you
+		#  set "send_error = yes", then the error
+		#  message will be sent back to the client.
+		#  This *may* help some clients work better,
+		#  but *may* also cause other clients to stop
+		#  working.
+		#
+#		send_error = no
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/echo b/src/test/setup/radius-config/freeradius/mods-available/echo
new file mode 100644
index 0000000..c21a8ff
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/echo
@@ -0,0 +1,123 @@
+# -*- text -*-
+#
+#  $Id: ad3e15933f9e85c5566810432a5fec8f23d877c1 $
+
+#
+#  This is a more general example of the execute module.
+#
+#  This one is called "echo".
+#
+#  Attribute-Name = `%{echo:/path/to/program args}`
+#
+#  If you wish to execute an external program in more than
+#  one section (e.g. 'authorize', 'pre_proxy', etc), then it
+#  is probably best to define a different instance of the
+#  'exec' module for every section.
+#
+#  The return value of the program run determines the result
+#  of the exec instance call as follows:
+#  (See doc/configurable_failover for details)
+#
+#  < 0 : fail      the module failed
+#  = 0 : ok        the module succeeded
+#  = 1 : reject    the module rejected the user
+#  = 2 : fail      the module failed
+#  = 3 : ok        the module succeeded
+#  = 4 : handled   the module has done everything to handle the request
+#  = 5 : invalid   the user's configuration entry was invalid
+#  = 6 : userlock  the user was locked out
+#  = 7 : notfound  the user was not found
+#  = 8 : noop      the module did nothing
+#  = 9 : updated   the module updated information in the request
+#  > 9 : fail      the module failed
+#
+exec echo {
+	#
+	#  Wait for the program to finish.
+	#
+	#  If we do NOT wait, then the program is "fire and
+	#  forget", and any output attributes from it are ignored.
+	#
+	#  If we are looking for the program to output
+	#  attributes, and want to add those attributes to the
+	#  request, then we MUST wait for the program to
+	#  finish, and therefore set 'wait=yes'
+	#
+	# allowed values: {no, yes}
+	wait = yes
+
+	#
+	#  The name of the program to execute, and it's
+	#  arguments.  Dynamic translation is done on this
+	#  field, so things like the following example will
+	#  work.
+	#
+	program = "/bin/echo %{User-Name}"
+
+	#
+	#  The attributes which are placed into the
+	#  environment variables for the program.
+	#
+	#  Allowed values are:
+	#
+	#	request		attributes from the request
+	#	config		attributes from the configuration items list
+	#	reply		attributes from the reply
+	#	proxy-request	attributes from the proxy request
+	#	proxy-reply	attributes from the proxy reply
+	#
+	#  Note that some attributes may not exist at some
+	#  stages.  e.g. There may be no proxy-reply
+	#  attributes if this module is used in the
+	#  'authorize' section.
+	#
+	input_pairs = request
+
+	#
+	#  Where to place the output attributes (if any) from
+	#  the executed program.  The values allowed, and the
+	#  restrictions as to availability, are the same as
+	#  for the input_pairs.
+	#
+	output_pairs = reply
+
+	#
+	#  When to execute the program.  If the packet
+	#  type does NOT match what's listed here, then
+	#  the module does NOT execute the program.
+	#
+	#  For a list of allowed packet types, see
+	#  the 'dictionary' file, and look for VALUEs
+	#  of the Packet-Type attribute.
+	#
+	#  By default, the module executes on ANY packet.
+	#  Un-comment out the following line to tell the
+	#  module to execute only if an Access-Accept is
+	#  being sent to the NAS.
+	#
+	#packet_type = Access-Accept
+
+	#
+	#  Should we escape the environment variables?
+	#
+	#  If this is set, all the RADIUS attributes
+	#  are capitalised and dashes replaced with
+	#  underscores. Also, RADIUS values are surrounded
+	#  with double-quotes.
+	#
+	#  That is to say: User-Name=BobUser => USER_NAME="BobUser"
+	shell_escape = yes
+
+	#
+	#  How long should we wait for the program to finish?
+	#
+	#  Default is 10 seconds, which should be plenty for nearly
+	#  anything. Range is 1 to 30 seconds. You are strongly
+	#  encouraged to NOT increase this value. Decreasing can
+	#  be used to cause authentication to fail sooner when you
+	#  know it's going to fail anyway due to the time taken,
+	#  thereby saving resources.
+	#
+	#timeout = 10
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/etc_group b/src/test/setup/radius-config/freeradius/mods-available/etc_group
new file mode 100644
index 0000000..6aea41b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/etc_group
@@ -0,0 +1,28 @@
+# -*- text -*-
+#
+#  $Id: f58b72f560ba067991d67295b546691bcd992d44 $
+
+#  "passwd" configuration, for the /etc/group file. Adds a Etc-Group-Name
+#  attribute for every group that the user is member of.
+#
+#  You will have to define the Etc-Group-Name in the 'dictionary' file
+#  as a 'string' type.
+#
+#  The Group and Group-Name attributes are automatically created by
+#  the Unix module, and do checking against /etc/group automatically.
+#  This means that you CANNOT use Group or Group-Name to do any other
+#  kind of grouping in the server.  You MUST define a new group
+#  attribute.
+#
+#  i.e. this module should NOT be used as-is, but should be edited to
+#  point to a different group file.
+#
+passwd etc_group {
+	filename = /etc/group
+	format = "=Etc-Group-Name:::*,User-Name"
+	hash_size = 50
+	ignore_nislike = yes
+	allow_multiple_keys = yes
+	delimiter = ":"
+}
+
diff --git a/src/test/setup/radius-config/freeradius/mods-available/exec b/src/test/setup/radius-config/freeradius/mods-available/exec
new file mode 100644
index 0000000..470b9cb
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/exec
@@ -0,0 +1,30 @@
+# -*- text -*-
+#
+#  $Id: 5f21e4350f091ed51813865a31b2796c4b487f9f $
+
+#
+#  Execute external programs
+#
+#  This module is useful only for 'xlat'.  To use it,
+#  put 'exec' into the 'instantiate' section.  You can then
+#  do dynamic translation of attributes like:
+#
+#  Attribute-Name = `%{exec:/path/to/program args}`
+#
+#  The value of the attribute will be replaced with the output
+#  of the program which is executed.  Due to RADIUS protocol
+#  limitations, any output over 253 bytes will be ignored.
+#
+#  The RADIUS attributes from the user request will be placed
+#  into environment variables of the executed program, as
+#  described in "man unlang" and in doc/variables.txt
+#
+#  See also "echo" for more sample configuration.
+#
+exec {
+	wait = no
+	input_pairs = request
+	shell_escape = yes
+	output = none
+	timeout = 10
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/expiration b/src/test/setup/radius-config/freeradius/mods-available/expiration
new file mode 100644
index 0000000..dfc0550
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/expiration
@@ -0,0 +1,13 @@
+# -*- text -*-
+#
+#  $Id: 5d06454d0a8ccce7f50ddf7b01ba01c4ace6560a $
+
+#
+# The expiration module. This handles the Expiration attribute
+# It should be included in the *end* of the authorize section
+# in order to handle user Expiration. It should also be included
+# in the instantiate section in order to register the Expiration
+# compare function
+#
+expiration {
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/expr b/src/test/setup/radius-config/freeradius/mods-available/expr
new file mode 100644
index 0000000..ab7de2d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/expr
@@ -0,0 +1,26 @@
+# -*- text -*-
+#
+#  $Id: 1e130ef24a4dbcd55f347ebd799a8b2bf4f3333a $
+
+#
+#  This module is useful only for 'xlat'.  To use it,
+#  put 'expr' into the 'instantiate' section.  You can then
+#  do dynamic translation of attributes like:
+#
+#  Attribute-Name = `%{expr:2 + 3 + %{exec: uid -u}}`
+#
+#  The value of the attribute will be replaced with the output
+#  of the program which is executed.  Due to RADIUS protocol
+#  limitations, any output over 253 bytes will be ignored.
+#
+#  The module also registers a few paircompare functions, and
+#  many string manipulation functions.
+#
+
+expr {
+	#
+	# Characters that will not be encoded by the %{encode}
+	# xlat function.
+	#
+	safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/files b/src/test/setup/radius-config/freeradius/mods-available/files
new file mode 100644
index 0000000..0e92702
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/files
@@ -0,0 +1,30 @@
+# -*- text -*-
+#
+#  $Id: c14992f05c13983fa0c0f9bcead4c1cf0c1bb801 $
+
+# Livingston-style 'users' file
+#
+# See "man users" for more information.
+#
+files {
+	# Search for files in a subdirectory of mods-config which
+	# matches this instance of the files module.
+	moddir = ${modconfdir}/${.:instance}
+
+	# The default key attribute to use for matches.  The content
+	# of this attribute is used to match the "name" of the
+	# entry.
+	#key = "%{%{Stripped-User-Name}:-%{User-Name}}"
+
+	#  The old "users" style file is now located here.
+	filename = ${moddir}/authorize
+
+	#  This is accepted for backwards compatibility
+	#  It will be removed in a future release.
+	usersfile = ${moddir}/authorize
+
+	#  These are accepted for backwards compatibility.
+	#  They will be renamed in a future release.
+	acctusersfile = ${moddir}/accounting
+	preproxy_usersfile = ${moddir}/pre-proxy
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/idn b/src/test/setup/radius-config/freeradius/mods-available/idn
new file mode 100644
index 0000000..31874c5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/idn
@@ -0,0 +1,28 @@
+# -*- text -*-
+#
+#  $Id: 534054077d52a7bb0bf8e02c1e861e5c86b76df9 $
+
+#
+# Internationalised domain names.
+#
+
+#  The expansion string: %{idn: example.com} results in an ASCII
+#  punycode version of the domain name.  That version can then be used
+#  for name comparisons.  Using an i18n version of the name is NOT
+#  RECOMMENDED, as that version is not canonical.
+#
+#  i.e. the "same" domain name can be represented in many, many,
+#  different ways.  Only the idn version has *one* representation.
+#
+idn {
+	#
+	#  Allow use of unassigned Unicode code points.
+	#
+	allow_unassigned = no
+
+	#
+	#  Prohibit underscores and other invalid characters in domain
+	#  names.
+	use_std3_ascii_rules = yes
+
+}
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-available/inner-eap b/src/test/setup/radius-config/freeradius/mods-available/inner-eap
new file mode 100644
index 0000000..9eed1ce
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/inner-eap
@@ -0,0 +1,94 @@
+# -*- text -*-
+#
+#  $Id: 9a690b77c2eaea1086d9748012c380283714f452 $
+
+#
+#  Sample configuration for an EAP module that occurs *inside*
+#  of a tunneled method.  It is used to limit the EAP types that
+#  can occur inside of the inner tunnel.
+#
+#  See also raddb/sites-available/inner-tunnel
+#
+#  See raddb/mods-available/eap for full documentation on the meaning of these
+#  configuration entries.
+#
+eap inner-eap {
+	# This is the best choice for PEAP.
+	default_eap_type = mschapv2
+
+	timer_expire     = 60
+
+	#  This should be the same as the outer eap "max sessions"
+	max_sessions = 2048
+
+	# Supported EAP-types
+	md5 {
+	}
+
+	gtc {
+		#  The default challenge, which many clients
+		#  ignore..
+		#challenge = "Password: "
+
+		auth_type = PAP
+	}
+
+	mschapv2 {
+		# See eap for documentation
+#		send_error = no
+	}
+
+	# No TTLS or PEAP configuration should be listed here.
+
+	## EAP-TLS
+	#
+	#  You SHOULD use different certificates than are used
+	#  for the outer EAP configuration!
+	#
+	#  Support for PEAP/TLS and RFC 5176 TLS/TLS is experimental.
+	#  It might work, or it might not.
+	#
+	tls {
+		private_key_password = whatever
+		private_key_file = ${certdir}/inner-server.pem
+
+		#  If Private key & Certificate are located in
+		#  the same file, then private_key_file &
+		#  certificate_file must contain the same file
+		#  name.
+		#
+		#  If ca_file (below) is not used, then the
+		#  certificate_file below MUST include not
+		#  only the server certificate, but ALSO all
+		#  of the CA certificates used to sign the
+		#  server certificate.
+		certificate_file = ${certdir}/inner-server.pem
+
+		#  You may want different CAs for inner and outer
+		#  certificates.  If so, edit this file.
+		ca_file = ${cadir}/ca.pem
+
+		cipher_list = "DEFAULT"
+
+		#  You may want to set a very small fragment size.
+		#  The TLS data here needs to go inside of the
+		#  outer EAP-TLS protocol.
+		#
+		#  Try values and see if they work...
+	#	fragment_size = 1024
+
+		#  Other needful things
+		dh_file = ${certdir}/dh
+		random_file = ${certdir}/random
+
+		#  CRL and OCSP things go here.  See the main "eap"
+		#  file for details.
+	#	check_crl = yes
+	#	ca_path = /path/to/directory/with/ca_certs/and/crls/
+
+		#
+		#  The session resumption / fast re-authentication
+		#  cache CANNOT be used for inner sessions.
+		#
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ippool b/src/test/setup/radius-config/freeradius/mods-available/ippool
new file mode 100644
index 0000000..8b263bd
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/ippool
@@ -0,0 +1,66 @@
+# -*- text -*-
+#
+#  $Id: 1d3305ba45ec71336f55f8f1db05f183772e1b82 $
+
+#  Do server side ip pool management. Should be added in
+#  post-auth and accounting sections.
+#
+#  The module also requires the existence of the Pool-Name
+#  attribute. That way the administrator can add the Pool-Name
+#  attribute in the user profiles and use different pools for
+#  different users. The Pool-Name attribute is a *check* item
+#  not a reply item.
+#
+#  The Pool-Name should be set to the ippool module instance
+#  name or to DEFAULT to match any module.
+
+#
+# Example:
+# radiusd.conf: ippool students { [...] }
+#		ippool teachers { [...] }
+# users file  : DEFAULT Group == students, Pool-Name := "students"
+#		DEFAULT Group == teachers, Pool-Name := "teachers"
+#		DEFAULT	Group == other, Pool-Name := "DEFAULT"
+#
+# Note: If you change the range parameters you must then erase the
+#       db files.
+#
+ippool main_pool {
+	#  The main db file used to allocate addresses.
+	filename = ${db_dir}/db.ippool
+
+	#  The start and end ip addresses for this pool.
+	range_start = 192.0.2.1
+	range_stop = 192.0.2.254
+
+	#  The network mask used for this pool.
+	netmask = 255.255.255.0
+
+	#  The gdbm cache size for the db files. Should
+	#  be equal to the number of ip's available in
+	#  the ip pool
+	cache_size = 800
+
+	#  Helper db index file used in multilink
+	ip_index = ${db_dir}/db.ipindex
+
+	#  If set, the Framed-IP-Address already in the
+	#  reply (if any) will be discarded, and replaced
+	#  ith a Framed-IP-Address assigned here.
+	override = no
+
+	#  Specifies the maximum time in seconds that an
+	#  entry may be active.  If set to zero, means
+	#  "no timeout".  The default value is 0
+	maximum_timeout = 0
+
+	#  The key to use for the session database (which
+	#  holds the allocated ip's) normally it should
+	#  just be the nas ip/port (which is the default).
+	#
+	#  If your NAS sends the same value of NAS-Port
+	#  all requests, the key should be based on some
+	#  other attribute that is in ALL requests, AND
+	#  is unique to each machine needing an IP address.
+#	key = "%{NAS-IP-Address} %{NAS-Port}"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/krb5 b/src/test/setup/radius-config/freeradius/mods-available/krb5
new file mode 100644
index 0000000..eaadd9f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/krb5
@@ -0,0 +1,59 @@
+# -*- text -*-
+#
+#  $Id: d17b8b8fb8b442869e4aff143d345168875c55c8 $
+
+#
+#  Kerberos.  See doc/rlm_krb5 for minimal docs.
+#
+krb5 {
+	keytab = /path/to/keytab
+	service_principal = name_of_principle
+
+	#  Pool of krb5 contexts, this allows us to make the module multithreaded
+	#  and to avoid expensive operations like resolving and opening keytabs
+	#  on every request.  It may also allow TCP connections to the KDC to be
+	#  cached if that is supported by the version of libkrb5 used.
+	#
+	#  The context pool is only used if the underlying libkrb5 reported
+	#  that it was thread safe at compile time.
+	pool {
+		# Number of contexts to create
+		start = 10
+
+		# Minimum number of contexts to keep available
+		min = 4
+
+		# Maximum number of contexts
+		#
+		# If these contexts are all in use and a new one
+		# is requested, the request will NOT get a connection.
+		max = 10
+
+		# Spare contexts to be left idle
+		#
+		# NOTE: Idle contexts WILL be closed if "idle_timeout"
+		# is set.
+		spare = 3
+
+		# Number of uses before the context is freed
+		# 0 means "infinite"
+		uses = 0
+
+		# The lifetime (in seconds) of the context
+		lifetime = 0
+
+		# idle timeout (in seconds).  A context which is
+		# unused for this length of time will be freed.
+		idle_timeout = 60
+
+		# NOTE: All configuration settings are enforced.  If a
+		# context is closed because of "idle_timeout",
+		# "uses", or "lifetime", then the total number of
+		# contexts MAY fall below "min".  When that
+		# happens, it will create a new context.  It will
+		# also log a WARNING message.
+		#
+		# The solution is to either lower the "min" contexts,
+		# or increase lifetime/idle_timeout.
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ldap b/src/test/setup/radius-config/freeradius/mods-available/ldap
new file mode 100644
index 0000000..8b9e667
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/ldap
@@ -0,0 +1,468 @@
+# -*- text -*-
+#
+#  $Id: af3f155ff51f4ebe7bfaffcb55a23238f128e843 $
+
+#
+#  Lightweight Directory Access Protocol (LDAP)
+#
+ldap {
+	#  Note that this needs to match the name(s) in the LDAP server
+	#  certificate, if you're using ldaps.  See OpenLDAP documentation
+	#  for the behavioral semantics of specifying more than one host.
+	server = "ldap.rrdns.example.org ldap.rrdns.example.org ldap.example.org"
+
+	#  Port to connect on, defaults to 389. Setting this to 636 will enable
+	#  LDAPS if start_tls (see below) is not able to be used.
+#	port = 389
+
+	#  Administrator account for searching and possibly modifying.
+#	identity = "cn=admin,dc=example,dc=org"
+#	password = mypass
+
+	#  Unless overridden in another section, the dn from which all
+	#  searches will start from.
+#	base_dn = "dc=example,dc=org"
+
+	#
+	#  Generic valuepair attribute
+	#
+	
+	#  If set, this will attribute will be retrieved in addition to any
+	#  mapped attributes.
+	#
+	#  Values should be in the format:
+	#  	<radius attr> <op> <value>
+	#
+	#  Where:
+	#  	<radius attr>:	Is the attribute you wish to create
+	# 			with any valid list and request qualifiers.
+	#  	<op>: 		Is any assignment attribute (=, :=, +=, -=).
+	#  	<value>:	Is the value to parse into the new valuepair.
+	# 			If the attribute name is wrapped in double
+	# 			quotes it will be xlat expanded.
+#	valuepair_attribute = "radiusAttribute"
+
+	#
+	#  Mapping of LDAP directory attributes to RADIUS dictionary attributes.
+	#
+	
+	#  WARNING: Although this format is almost identical to the unlang
+	#  update section format, it does *NOT* mean that you can use other
+	#  unlang constructs in module configuration files.
+	#
+	#  Configuration items are in the format:
+	# 	<radius attr> <op> <ldap attr>
+	#
+	#  Where:
+	#  	<radius attr>:	Is the destination RADIUS attribute
+	# 			with any valid list and request qualifiers.
+	#  	<op>: 		Is any assignment attribute (=, :=, +=, -=).
+	#  	<ldap attr>:	Is the attribute associated with user or
+	#			profile objects in the LDAP directory.
+	# 			If the attribute name is wrapped in double
+	# 			quotes it will be xlat expanded.
+	#
+	#  Request and list qualifiers may also be placed after the 'update'
+	#  section name to set defaults destination requests/lists
+	#  for unqualified RADIUS attributes.
+	#
+	#  Note: LDAP attribute names should be single quoted unless you want
+	#  the name value to be derived from an xlat expansion, or an
+	#  attribute ref.
+	update {
+		control:Password-With-Header	+= 'userPassword'
+#		control:NT-Password		:= 'ntPassword'
+#		reply:Reply-Message		:= 'radiusReplyMessage'
+#		reply:Tunnel-Type		:= 'radiusTunnelType'
+#		reply:Tunnel-Medium-Type	:= 'radiusTunnelMediumType'
+#		reply:Tunnel-Private-Group-ID	:= 'radiusTunnelPrivategroupId'
+
+		#  These are provided for backwards compatibility.
+		#  Where only a list is specified as the RADIUS attribute,
+		#  the value of the LDAP attribute is parsed as a valuepair
+		#  in the same format as the 'valuepair_attribute' (above).
+#		control:			+= 'radiusCheckAttributes'
+#		reply:				+= 'radiusReplyAttributes'
+	}
+
+	#  Set to yes if you have eDirectory and want to use the universal
+	#  password mechanism.
+#	edir = no
+
+	#  Set to yes if you want to bind as the user after retrieving the
+	#  Cleartext-Password. This will consume the login grace, and
+	#  verify user authorization.
+#	edir_autz = no
+
+	#  Note: set_auth_type was removed in v3.x.x
+	#  Equivalent functionality can be achieved by adding the following
+	#  stanza to the authorize {} section of your virtual server.
+	#
+	#    ldap
+	#    if ((ok || updated) && User-Password) {
+	#        update {
+	#            control:Auth-Type := ldap
+	#        }
+	#    }
+	
+	#
+	#  User object identification.
+	#
+	user {
+		#  Where to start searching in the tree for users
+		base_dn = "${..base_dn}"
+
+		#  Filter for user objects, should be specific enough
+		#  to identify a single user object.
+		filter = "(uid=%{%{Stripped-User-Name}:-%{User-Name}})"
+
+		#  Search scope, may be 'base', 'one', sub' or 'children'
+#		scope = 'sub'
+
+		#  If this is undefined, anyone is authorised.
+		#  If it is defined, the contents of this attribute
+		#  determine whether or not the user is authorised
+#		access_attribute = "dialupAccess"
+
+		#  Control whether the presence of "access_attribute"
+		#  allows access, or denys access.
+		#
+		#  If "yes", and the access_attribute is present, or
+		#  "no" and the access_attribute is absent then access
+		#  will be allowed.
+		#
+		#  If "yes", and the access_attribute is absent, or
+		#  "no" and the access_attribute is present, then
+		#  access will not be allowed.
+		#
+		#  If the value of the access_attribute is "false", it
+		#  will negate the result.
+		#
+		#  e.g.
+		#    access_positive = yes
+		#    access_attribute = userAccessAllowed
+		#
+		#    userAccessAllowed = false
+		#
+		#  Will result in the user being locked out.
+#		access_positive = yes
+	}
+
+	#
+	#  User membership checking.
+	#
+	group {
+		#  Where to start searching in the tree for groups
+		base_dn = "${..base_dn}"
+
+		#  Filter for group objects, should match all available
+		#  group objects a user might be a member of.
+		filter = "(objectClass=posixGroup)"
+
+		# Search scope, may be 'base', 'one', sub' or 'children'
+#		scope = 'sub'
+
+		#  Attribute that uniquely identifies a group.
+		#  Is used when converting group DNs to group
+		#  names.
+#		name_attribute = cn
+
+		#  Filter to find group objects a user is a member of.
+		#  That is, group objects with attributes that
+		#  identify members (the inverse of membership_attribute).
+#		membership_filter = "(|(member=%{control:Ldap-UserDn})(memberUid=%{%{Stripped-User-Name}:-%{User-Name}}))"
+
+		#  The attribute in user objects which contain the names
+		#  or DNs of groups a user is a member of.
+		#
+		#  Unless a conversion between group name and group DN is
+		#  needed, there's no requirement for the group objects
+		#  referenced to actually exist.
+		membership_attribute = "memberOf"
+
+		#  If cacheable_name or cacheable_dn are enabled,
+		#  all group information for the user will be
+		#  retrieved from the directory and written to LDAP-Group
+		#  attributes appropriate for the instance of rlm_ldap.
+		#
+		#  For group comparisons these attributes will be checked
+		#  instead of querying the LDAP directory directly.
+		#
+		#  This feature is intended to be used with rlm_cache.
+		#
+		#  If you wish to use this feature, you should enable
+		#  the type that matches the format of your check items
+		#  i.e. if your groups are specified as DNs then enable
+		#  cacheable_dn else enable cacheable_name.
+#		cacheable_name = "no"
+#		cacheable_dn = "no"
+
+		#  Override the normal cache attribute (<inst>-LDAP-Group)
+		#  and create a custom attribute.  This can help if multiple
+		#  module instances are used in fail-over.
+#		cache_attribute = "LDAP-Cached-Membership"
+	}
+
+	#
+	#  User profiles. RADIUS profile objects contain sets of attributes
+	#  to insert into the request. These attributes are mapped using
+	#  the same mapping scheme applied to user objects.
+	#
+	profile {
+		#  Filter for RADIUS profile objects
+#		filter = "(objectclass=radiusprofile)"
+
+		#  The default profile applied to all users.
+#		default = "cn=radprofile,dc=example,dc=org"
+
+		#  The list of profiles which are applied (after the default)
+		#  to all users.
+		#  The "User-Profile" attribute in the control list
+		#  will override this setting at run-time.
+#		attribute = "radiusProfileDn"
+	}
+
+	#
+	#  Bulk load clients from the directory
+	#
+	client {
+		#   Where to start searching in the tree for clients
+		base_dn = "${..base_dn}"
+
+		#
+		#  Filter to match client objects
+		#
+		filter = '(objectClass=frClient)'
+
+		# Search scope, may be 'base', 'one', 'sub' or 'children'
+#		scope = 'sub'
+
+		#
+		#  Client attribute mappings are in the format:
+		#      <client attribute> = <ldap attribute>
+		#
+		#  Arbitrary attributes (accessible by %{client:<attr>}) are not yet supported.
+		#
+		#  The following attributes are required:
+		#    * identifier - IPv4 address, or IPv4 address with prefix, or hostname.
+		#    * secret - RADIUS shared secret.
+		#
+		#  The following attributes are optional:
+		#    * shortname - Friendly name associated with the client
+		#    * nas_type - NAS Type
+		#    * virtual_server - Virtual server to associate the client with
+		#    * require_message_authenticator - Whether we require the Message-Authenticator
+		#      attribute to be present in requests from the client.
+		#
+		#  Schemas are available in doc/schemas/ldap for openldap and eDirectory
+		#
+		attribute {
+			identifier			= 'radiusClientIdentifier'
+			secret				= 'radiusClientSecret'
+#			shortname			= 'radiusClientShortname'
+#			nas_type			= 'radiusClientType'
+#			virtual_server			= 'radiusClientVirtualServer'
+#			require_message_authenticator	= 'radiusClientRequireMa'
+		}
+	}
+
+	#  Load clients on startup
+#	read_clients = no
+
+	#
+	#  Modify user object on receiving Accounting-Request
+	#
+
+	#  Useful for recording things like the last time the user logged
+	#  in, or the Acct-Session-ID for CoA/DM.
+	#
+	#  LDAP modification items are in the format:
+	# 	<ldap attr> <op> <value>
+	#
+	#  Where:
+	#  	<ldap attr>:	The LDAP attribute to add modify or delete.
+	#  	<op>: 		One of the assignment operators:
+	#			(:=, +=, -=, ++).
+	#			Note: '=' is *not* supported.
+	#  	<value>:	The value to add modify or delete.
+	#
+	#  WARNING: If using the ':=' operator with a multi-valued LDAP
+	#  attribute, all instances of the attribute will be removed and
+	#  replaced with a single attribute.
+	accounting {
+		reference = "%{tolower:type.%{Acct-Status-Type}}"
+
+		type {
+			start {
+				update {
+					description := "Online at %S"
+				}
+			}
+
+			interim-update {
+				update {
+					description := "Last seen at %S"
+				}
+			}
+
+			stop {
+				update {
+					description := "Offline at %S"
+				}
+			}
+		}
+	}
+
+	#
+	#  Post-Auth can modify LDAP objects too
+	#
+	post-auth {
+		update {
+			description := "Authenticated at %S"
+		}
+	}
+
+	#
+	#  LDAP connection-specific options.
+	#
+	#  These options set timeouts, keep-alives, etc. for the connections.
+	#
+	options {
+		#
+		#  The following two configuration items are for Active Directory
+		#  compatibility.  If you set these to "no", then searches
+		#  will likely return "operations error", instead of a
+		#  useful result.
+		#
+		chase_referrals = yes
+		rebind = yes
+
+		#  Seconds to wait for LDAP query to finish. default: 20
+		timeout = 10
+
+		#  Seconds LDAP server has to process the query (server-side
+		#  time limit). default: 20
+		#
+		#  LDAP_OPT_TIMELIMIT is set to this value.
+		timelimit = 3
+
+		#  Seconds to wait for response of the server. (network
+		#  failures) default: 10
+		#
+		#  LDAP_OPT_NETWORK_TIMEOUT is set to this value.
+		net_timeout = 1
+
+		#  LDAP_OPT_X_KEEPALIVE_IDLE
+		idle = 60
+
+		#  LDAP_OPT_X_KEEPALIVE_PROBES
+		probes = 3
+
+		#  LDAP_OPT_X_KEEPALIVE_INTERVAL
+		interval = 3
+
+		#  ldap_debug: debug flag for LDAP SDK
+		#  (see OpenLDAP documentation).  Set this to enable
+		#  huge amounts of LDAP debugging on the screen.
+		#  You should only use this if you are an LDAP expert.
+		#
+		#	default: 0x0000 (no debugging messages)
+		#	Example:(LDAP_DEBUG_FILTER+LDAP_DEBUG_CONNS)
+		ldap_debug = 0x0028
+	}
+
+	#
+	#  This subsection configures the tls related items
+	#  that control how FreeRADIUS connects to an LDAP
+	#  server.  It contains all of the "tls_*" configuration
+	#  entries used in older versions of FreeRADIUS.  Those
+	#  configuration entries can still be used, but we recommend
+	#  using these.
+	#
+	tls {
+		# Set this to 'yes' to use TLS encrypted connections
+		# to the LDAP database by using the StartTLS extended
+		# operation.
+		#
+		# The StartTLS operation is supposed to be
+		# used with normal ldap connections instead of
+		# using ldaps (port 636) connections
+#		start_tls = yes
+
+#		ca_file	= ${certdir}/cacert.pem
+
+#		ca_path	= ${certdir}
+#		certificate_file = /path/to/radius.crt
+#		private_key_file = /path/to/radius.key
+#		random_file = ${certdir}/random
+
+		#  Certificate Verification requirements.  Can be:
+		#    "never" (don't even bother trying)
+		#    "allow" (try, but don't fail if the certificate
+		#		can't be verified)
+		#    "demand" (fail if the certificate doesn't verify.)
+		#
+		#  The default is "allow"
+#		require_cert	= "demand"
+	}
+
+
+	#  As of version 3.0, the "pool" section has replaced the
+	#  following configuration items:
+	#
+	#  ldap_connections_number
+
+	#  The connection pool is new for 3.0, and will be used in many
+	#  modules, for all kinds of connection-related activity.
+	#
+	#  When the server is not threaded, the connection pool
+	#  limits are ignored, and only one connection is used.
+	pool {
+		#  Number of connections to start
+		start = 5
+
+		#  Minimum number of connections to keep open
+		min = 4
+
+		#  Maximum number of connections
+		#
+		#  If these connections are all in use and a new one
+		#  is requested, the request will NOT get a connection.
+		#
+		#  Setting 'max' to LESS than the number of threads means
+		#  that some threads may starve, and you will see errors
+		#  like "No connections available and at max connection limit"
+		#
+		#  Setting 'max' to MORE than the number of threads means
+		#  that there are more connections than necessary.
+		max = ${thread[pool].max_servers}
+
+		#  Spare connections to be left idle
+		#
+		#  NOTE: Idle connections WILL be closed if "idle_timeout"
+		#  is set.
+		spare = 3
+
+		#  Number of uses before the connection is closed
+		#
+		#  0 means "infinite"
+		uses = 0
+
+		#  The lifetime (in seconds) of the connection
+		lifetime = 0
+
+		#  Idle timeout (in seconds).  A connection which is
+		#  unused for this length of time will be closed.
+		idle_timeout = 60
+
+		#  NOTE: All configuration settings are enforced.  If a
+		#  connection is closed because of "idle_timeout",
+		#  "uses", or "lifetime", then the total number of
+		#  connections MAY fall below "min".  When that
+		#  happens, it will open a new connection.  It will
+		#  also log a WARNING message.
+		#
+		#  The solution is to either lower the "min" connections,
+		#  or increase lifetime/idle_timeout.
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/linelog b/src/test/setup/radius-config/freeradius/mods-available/linelog
new file mode 100644
index 0000000..d1b68bf
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/linelog
@@ -0,0 +1,113 @@
+# -*- text -*-
+#
+#  $Id: 779752bc10c156ba1981810186a4af828a18c014 $
+
+#
+#  The "linelog" module will log one line of text to a file.
+#  Both the filename and the line of text are dynamically expanded.
+#
+#  We STRONGLY suggest that you do not use data from the
+#  packet as part of the filename.
+#
+linelog {
+	#
+	#  The file where the logs will go.
+	#
+	#  If the filename is "syslog", then the log messages will
+	#  go to syslog.
+	filename = ${logdir}/linelog
+
+	#
+	#  The Unix-style permissions on the log file.
+	#
+	#  Depending on format string, the log file may contain secret or
+	#  private information about users.  Keep the file permissions as
+	#  restrictive as possible.
+	permissions = 0600
+
+	#
+	# The Unix group which owns the log file.
+	#
+	# The user that freeradius runs as must be in the specified
+	# group, otherwise it will not be possible to set the group.
+	#
+	# group = ${security.group}
+
+	#
+	# If logging via syslog, the facility can be set here. Otherwise
+	# the syslog_facility option in radiusd.conf will be used.
+	#
+	# syslog_facility = daemon
+
+	#
+	#  The default format string.
+	format = "This is a log message for %{User-Name}"
+
+	#
+	#  This next line can be omitted.  If it is omitted, then
+	#  the log message is static, and is always given by "format",
+	#  above.
+	#
+	#  If it is defined, then the string is dynamically expanded,
+	#  and the result is used to find another configuration entry
+	#  here, with the given name.  That name is then used as the
+	#  format string.
+	#
+	#  If the configuration entry cannot be found, then no log
+	#  message is printed.
+	#
+	#  i.e. You can have many log messages in one "linelog" module.
+	#  If this two-step expansion did not exist, you would have
+	#  needed to configure one "linelog" module for each log message.
+
+	#
+	#  Reference the Packet-Type (Access-Request, etc.)  If it doesn't
+	#  exist, reference the "format" entry, above.
+	reference = "messages.%{%{Packet-Type}:-default}"
+
+	#
+	#  The messages defined here are taken from the "reference"
+	#  expansion, above.
+	#
+	messages {
+		default = "Unknown packet type %{Packet-Type}"
+
+		Access-Request = "Requested access: %{User-Name}"
+		Access-Reject = "Rejected access: %{User-Name}"
+		Access-Challenge = "Sent challenge: %{User-Name}"
+	}
+}
+
+#
+#  Another example, for accounting packets.
+#
+linelog log_accounting {
+	#
+	#  Used if the expansion of "reference" fails.
+	#
+	format = ""
+
+	filename = ${logdir}/linelog-accounting
+
+	permissions = 0600
+
+	reference = "Accounting-Request.%{%{Acct-Status-Type}:-unknown}"
+
+	#
+	#  Another example:
+	#      
+	#
+	Accounting-Request {
+		Start = "Connect: [%{User-Name}] (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} ip %{Framed-IP-Address})"
+		Stop = "Disconnect: [%{User-Name}] (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} ip %{Framed-IP-Address}) %{Acct-Session-Time} seconds"
+
+		#  Don't log anything for these packets.
+		Alive = ""
+
+		Accounting-On = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) just came online"
+		Accounting-Off = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) just went offline"
+
+		# don't log anything for other Acct-Status-Types.
+		unknown = "NAS %{Packet-Src-IP-Address} (%{NAS-IP-Address}) sent unknown Acct-Status-Type %{Acct-Status-Type}"
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/logintime b/src/test/setup/radius-config/freeradius/mods-available/logintime
new file mode 100644
index 0000000..d4f6f3e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/logintime
@@ -0,0 +1,23 @@
+# -*- text -*-
+#
+#  $Id: 25344527759d22b49b5e990fd83f0e506442fa76 $
+
+# The logintime module. This handles the Login-Time,
+# Current-Time, and Time-Of-Day attributes.  It should be
+# included in the *end* of the authorize section in order to
+# handle Login-Time checks. It should also be included in the
+# instantiate section in order to register the Current-Time
+# and Time-Of-Day comparison functions.
+#
+# When the Login-Time attribute is set to some value, and the
+# user has been permitted to log in, a Session-Timeout is
+# calculated based on the remaining time.  See "doc/README".
+#
+logintime {
+	# The minimum timeout (in seconds) a user is allowed
+	# to have. If the calculated timeout is lower we don't
+	# allow the login. Some NAS do not handle values
+	# lower than 60 seconds well.
+	minimum_timeout = 60
+}
+
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mac2ip b/src/test/setup/radius-config/freeradius/mods-available/mac2ip
new file mode 100644
index 0000000..5d646af
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/mac2ip
@@ -0,0 +1,25 @@
+# -*- text -*-
+#
+#  $Id: a4ead1d64e8220344b483718ece4712bef5e9e36 $
+
+######################################################################
+#
+#  This next section is a sample configuration for the "passwd"
+#  module, that reads flat-text files.
+#
+#  The file is in the format <mac>,<ip>
+#
+#	00:01:02:03:04:05,192.0.2.100
+#	01:01:02:03:04:05,192.0.2.101
+#	02:01:02:03:04:05,192.0.2.102
+#
+#  This lets you perform simple static IP assignments from a flat-text
+#  file.  You will have to define lease times yourself.
+#
+######################################################################
+
+passwd mac2ip {
+	filename = ${modconfdir}/${.:name}/${.:instance}
+	format = "*DHCP-Client-Hardware-Address:=DHCP-Your-IP-Address"
+	delimiter = ","
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mac2vlan b/src/test/setup/radius-config/freeradius/mods-available/mac2vlan
new file mode 100644
index 0000000..ee8e4b3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/mac2vlan
@@ -0,0 +1,18 @@
+# -*- text -*-
+#
+#  $Id: a1db803a71cddbb98daeeeda515cff2fc77ea318 $
+
+#  A simple file to map a MAC address to a VLAN.
+#
+#  The file should be in the format MAC,VLAN
+#  the VLAN name cannot have spaces in it, for example:
+#
+#	00:01:02:03:04:05,VLAN1
+#	03:04:05:06:07:08,VLAN2
+#	...
+#
+passwd mac2vlan {
+	filename = ${modconfdir}/${.:name}/${.:instance}
+	format = "*VMPS-Mac:=VMPS-VLAN-Name"
+	delimiter = ","
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/mschap b/src/test/setup/radius-config/freeradius/mods-available/mschap
new file mode 100644
index 0000000..f2aa631
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/mschap
@@ -0,0 +1,106 @@
+# -*- text -*-
+#
+#  $Id: 2170df13dbb884fde5d596eba68056781ba3160c $
+
+# Microsoft CHAP authentication
+#
+#  This module supports MS-CHAP and MS-CHAPv2 authentication.
+#  It also enforces the SMB-Account-Ctrl attribute.
+#
+mschap {
+	#
+	#  If you are using /etc/smbpasswd, see the 'passwd'
+	#  module for an example of how to use /etc/smbpasswd
+
+	# if use_mppe is not set to no mschap will
+	# add MS-CHAP-MPPE-Keys for MS-CHAPv1 and
+	# MS-MPPE-Recv-Key/MS-MPPE-Send-Key for MS-CHAPv2
+	#
+#	use_mppe = no
+
+	# if mppe is enabled require_encryption makes
+	# encryption moderate
+	#
+#	require_encryption = yes
+
+	# require_strong always requires 128 bit key
+	# encryption
+	#
+#	require_strong = yes
+
+	# The module can perform authentication itself, OR
+	# use a Windows Domain Controller.  This configuration
+	# directive tells the module to call the ntlm_auth
+	# program, which will do the authentication, and return
+	# the NT-Key.  Note that you MUST have "winbindd" and
+	# "nmbd" running on the local machine for ntlm_auth
+	# to work.  See the ntlm_auth program documentation
+	# for details.
+	#
+	# If ntlm_auth is configured below, then the mschap
+	# module will call ntlm_auth for every MS-CHAP
+	# authentication request.  If there is a cleartext
+	# or NT hashed password available, you can set
+	# "MS-CHAP-Use-NTLM-Auth := No" in the control items,
+	# and the mschap module will do the authentication itself,
+	# without calling ntlm_auth.
+	#
+	# Be VERY careful when editing the following line!
+	#
+	# You can also try setting the user name as:
+	#
+	#	... --username=%{mschap:User-Name} ...
+	#
+	# In that case, the mschap module will look at the User-Name
+	# attribute, and do prefix/suffix checks in order to obtain
+	# the "best" user name for the request.
+	#
+#	ntlm_auth = "/path/to/ntlm_auth --request-nt-key --username=%{%{Stripped-User-Name}:-%{%{User-Name}:-None}} --challenge=%{%{mschap:Challenge}:-00} --nt-response=%{%{mschap:NT-Response}:-00}"
+
+	# The default is to wait 10 seconds for ntlm_auth to
+	# complete.  This is a long time, and if it's taking that
+	# long then you likely have other problems in your domain.
+	# The length of time can be decreased with the following
+	# option, which can save clients waiting if your ntlm_auth
+	# usually finishes quicker. Range 1 to 10 seconds.
+	#
+#	ntlm_auth_timeout = 10
+
+	passchange {
+		# This support MS-CHAPv2 (not v1) password change
+		# requests.  See doc/mschap.rst for more IMPORTANT
+		# information.
+		#
+		# Samba/ntlm_auth - if you are using ntlm_auth to
+		# validate passwords, you will need to use ntlm_auth
+		# to change passwords.  Uncomment the three lines
+		# below, and change the path to ntlm_auth.
+		#
+#		ntlm_auth = "/usr/bin/ntlm_auth --helper-protocol=ntlm-change-password-1"
+#		ntlm_auth_username = "username: %{mschap:User-Name}"
+#		ntlm_auth_domain = "nt-domain: %{mschap:NT-Domain}"
+
+		# To implement a local password change, you need to
+		# supply a string which is then expanded, so that the
+		# password can be placed somewhere.  e.g. passed to a
+		# script (exec), or written to SQL (UPDATE/INSERT).
+		# We give both examples here, but only one will be
+		# used.
+		#
+#		local_cpw = "%{exec:/path/to/script %{mschap:User-Name} %{MS-CHAP-New-Cleartext-Password}}"
+		#
+#		local_cpw = "%{sql:UPDATE radcheck set value='%{MS-CHAP-New-NT-Password}' where username='%{SQL-User-Name}' and attribute='NT-Password'}"
+	}
+
+	# For Apple Server, when running on the same machine as
+	# Open Directory.  It has no effect on other systems.
+	#
+#	use_open_directory = yes
+
+	# On failure, set (or not) the MS-CHAP error code saying
+	# "retries allowed".
+#	allow_retry = yes
+
+	# An optional retry message.
+#	retry_msg = "Re-enter (or reset) the password"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth b/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth
new file mode 100644
index 0000000..9ee11aa
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/ntlm_auth
@@ -0,0 +1,12 @@
+#
+#  For testing ntlm_auth authentication with PAP.
+#
+#  If you have problems with authentication failing, even when the
+#  password is good, it may be a bug in Samba:
+#
+#	https://bugzilla.samba.org/show_bug.cgi?id=6563
+#
+exec ntlm_auth {
+	wait = yes
+	program = "/path/to/ntlm_auth --request-nt-key --domain=MYDOMAIN --username=%{mschap:User-Name} --password=%{User-Password}"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/opendirectory b/src/test/setup/radius-config/freeradius/mods-available/opendirectory
new file mode 100644
index 0000000..10dd507
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/opendirectory
@@ -0,0 +1,13 @@
+# -*- text -*-
+#
+#  $Id: 2a44ef695f4eaf6f1c461b3d92fda54e9b910f9e $
+
+#  This module is only used when the server is running on the same
+#  system as OpenDirectory.  The configuration of the module is hard-coded
+#  by Apple, and cannot be changed here.
+#
+#  There are no configuration entries for this module.
+#
+opendirectory {
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/otp b/src/test/setup/radius-config/freeradius/mods-available/otp
new file mode 100644
index 0000000..03d0262
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/otp
@@ -0,0 +1,77 @@
+#
+#  Configuration for the OTP module.
+#
+
+#  This module allows you to use various handheld OTP tokens
+#  for authentication (Auth-Type := otp).  These tokens are
+#  available from various vendors.
+#
+#  It works in conjunction with otpd, which implements token
+#  management and OTP verification functions; and lsmd or gsmd,
+#  which implements synchronous state management functions.
+#  otpd, lsmd and gsmd are available from TRI-D Systems:
+#              <http://www.tri-dsystems.com/>
+
+#  You must list this module in BOTH the authorize and authenticate
+#  sections in order to use it.
+otp {
+	# otpd rendezvous point.
+	# (default: /var/run/otpd/socket)
+	#otpd_rp = /var/run/otpd/socket
+
+	# Text to use for the challenge.
+	# Default "Challenge: %{reply:OTP-Challenge}\n Response: "
+
+	challenge_prompt = "Challenge: %{reply:OTP-Challenge} \n Response: "
+
+	# Length of the challenge.  Most tokens probably support a
+	# max of 8 digits.  (range: 5-32 digits, default 6)
+	#challenge_length = 6
+
+	# Maximum time, in seconds, that a challenge is valid.
+	# (The user must respond to a challenge within this time.)
+	# It is also the minimal time between consecutive async mode
+	# authentications, a necessary restriction due to an inherent
+	# weakness of the RADIUS protocol which allows replay attacks.
+	# (default: 30)
+	#challenge_delay = 30
+
+	# Whether or not to allow asynchronous ("pure" challenge/
+	# response) mode authentication.  Since sync mode is much more
+	# usable, and all reasonable tokens support it, the typical
+	# use of async mode is to allow re-sync of event based tokens.
+	# But because of the vulnerability of async mode with some tokens,
+	# you probably want to disable this and require that out-of-sync
+	# users re-sync from specifically secured terminals.
+	# See the otpd docs for more info.
+	# (default: no)
+	#allow_async = no
+
+	# Whether or not to allow synchronous mode authentication.
+	# When using otpd with lsmd, it is *CRITICALLY IMPORTANT*
+	# that if your OTP users can authenticate to multiple RADIUS
+	# servers, this must be "yes" for the primary/default server,
+	# and "no" for the others.  This is because lsmd does not
+	# share state information across multiple servers.  Using "yes"
+	# on all your RADIUS servers would allow replay attacks!
+	# Also, for event based tokens, the user will be out of sync
+	# on the "other" servers.  In order to use "yes" on all your
+	# servers, you must either use gsmd, which synchronises state
+	# globally, or implement your own state synchronisation method.
+	# (default: yes)
+	#allow_sync = yes
+
+	# If both allow_async and allow_sync are "yes", a challenge is
+	# always presented to the user.  This is incompatible with NAS
+	# that can't present or don't handle Access-Challenge's, e.g.
+	# PPTP servers.  Even though a challenge is presented, the user
+	# can still enter their synchronous passcode.
+
+	# The following are MPPE settings.  Note that MS-CHAP (v1) is
+	# strongly discouraged.  All possible values are listed as
+	# {value = meaning}.  Default values are first.
+	#mschapv2_mppe = {2 = required, 1 = optional, 0 = forbidden}
+	#mschapv2_mppe_bits = {2 = 128, 1 = 128 or 40, 0 = 40}
+	#mschap_mppe = {2 = required, 1 = optional, 0 = forbidden}
+	#mschap_mppe_bits = {2 = 128}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/pam b/src/test/setup/radius-config/freeradius/mods-available/pam
new file mode 100644
index 0000000..a31dfda
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/pam
@@ -0,0 +1,26 @@
+# -*- text -*-
+#
+#  $Id: f4a91a948637bb2f42f613ed9faa6f9ae9ae6099 $
+
+
+# Pluggable Authentication Modules
+#
+#  For Linux, see:
+#	http://www.kernel.org/pub/linux/libs/pam/index.html
+#
+#  WARNING: On many systems, the system PAM libraries have
+#           memory leaks!  We STRONGLY SUGGEST that you do not
+#	    use PAM for authentication, due to those memory leaks.
+#
+pam {
+	#
+	#  The name to use for PAM authentication.
+	#  PAM looks in /etc/pam.d/${pam_auth_name}
+	#  for it's configuration.  See 'redhat/radiusd-pam'
+	#  for a sample PAM configuration file.
+	#
+	#  Note that any Pam-Auth attribute set in the 'authorize'
+	#  section will over-ride this one.
+	#
+	pam_auth = radiusd
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/pap b/src/test/setup/radius-config/freeradius/mods-available/pap
new file mode 100644
index 0000000..1636b52
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/pap
@@ -0,0 +1,18 @@
+# -*- text -*-
+#
+#  $Id: 0038ecd154840c71ceff33ddfdd936e4e28e0bcd $
+
+# PAP module to authenticate users based on their stored password
+#
+#  Supports multiple encryption/hash schemes.  See "man rlm_pap"
+#  for details.
+#
+#  For instructions on creating the various types of passwords, see:
+#
+#  http://www.openldap.org/faq/data/cache/347.html
+pap {
+	#  By default the server will use heuristics to try and automatically
+	#  handle base64 or hex encoded passwords. This behaviour can be
+	#  stopped by setting the following to "no".
+	normalise = yes
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/passwd b/src/test/setup/radius-config/freeradius/mods-available/passwd
new file mode 100644
index 0000000..bf77f3a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/passwd
@@ -0,0 +1,55 @@
+# -*- text -*-
+#
+#  $Id: 11bd2246642bf3c080327c7f4a67dc42603f3a6c $
+
+# passwd module allows to do authorization via any passwd-like
+# file and to extract any attributes from these files.
+#
+#  See the "smbpasswd" and "etc_group" files for more examples.
+#
+# parameters are:
+#   filename - path to file
+#
+#   format - format for filename record. This parameters
+#            correlates record in the passwd file and RADIUS
+#            attributes.
+#
+#            Field marked as '*' is a key field. That is, the parameter
+#            with this name from the request is used to search for
+#            the record from passwd file
+#
+#            Attributes marked as '=' are added to reply_items instead
+#            of default configure_items
+#
+#	     Attributes marked as '~' are added to request_items
+#
+#            Field marked as ',' may contain a comma separated list
+#            of attributes.
+#
+#   hash_size - hashtable size.  Setting it to 0 is no longer permitted
+#		A future version of the server will have the module
+#		automatically determine the hash size.  Having it set
+#		manually should not be necessary.
+#
+#   allow_multiple_keys - if many records for a key are allowed
+#
+#   ignore_nislike - ignore NIS-related records
+#
+#   delimiter - symbol to use as a field separator in passwd file,
+#            for format ':' symbol is always used. '\0', '\n' are
+#	     not allowed
+#
+
+#  An example configuration for using /etc/passwd.
+#
+#  This is an example which will NOT WORK if you have shadow passwords,
+#  NIS, etc.  The "unix" module is normally responsible for reading
+#  system passwords.  You should use it instead of this example.
+#
+passwd etc_passwd {
+	filename = /etc/passwd
+	format = "*User-Name:Crypt-Password:"
+	hash_size = 100
+	ignore_nislike = no
+	allow_multiple_keys = no
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/perl b/src/test/setup/radius-config/freeradius/mods-available/perl
new file mode 100644
index 0000000..6936a78
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/perl
@@ -0,0 +1,83 @@
+# -*- text -*-
+#
+#  $Id: 3d9428e69c08fbc281f9830beae1cd8b7a5e5c70 $
+
+#  Persistent, embedded Perl interpreter.
+#
+perl {
+	#
+	#  The Perl script to execute on authorize, authenticate,
+	#  accounting, xlat, etc.  This is very similar to using
+	#  'rlm_exec' module, but it is persistent, and therefore
+	#  faster.
+	#
+	filename = ${modconfdir}/${.:instance}/example.pl
+
+	#
+	#  The following hashes are given to the module and
+	#  filled with value-pairs (Attribute names and values)
+	#
+	#  %RAD_CHECK		Check items
+	#  %RAD_REQUEST		Attributes from the request
+	#  %RAD_REPLY		Attributes for the reply
+	#  %RAD_REQUEST_PROXY	Attributes from the proxied request
+	#  %RAD_REQUEST_PROXY_REPLY Attributes from the proxy reply
+	#
+	#  The interface between FreeRADIUS and Perl is strings.
+	#  That is, attributes of type "octets" are converted to
+	#  printable strings, such as "0xabcdef".  If you want to
+	#  access the binary values of the attributes, you should
+	#  call the Perl "pack" function.  Then to send any binary
+	#  data back to FreeRADIUS, call the Perl "unpack" function,
+	#  so that the contents of the hashes are printable strings.
+	#
+	#  IP addresses are sent as strings, e.g. "192.0.2.25", and
+	#  not as a 4-byte binary value.  The same applies to other
+	#  attribute data types.
+	#
+	#  Attributes of type "string" are copied to Perl as-is.
+	#  They are not escaped or interpreted.
+	#
+	#  The return codes from functions in the perl_script
+	#  are passed directly back to the server.  These
+	#  codes are defined in mods-config/example.pl
+	#
+
+	# You can define configuration items (and nested sub-sections) in perl "config" section.
+	# These items will be accessible in the perl script through %RAD_PERLCONF hash.
+	# For instance: $RAD_PERLCONF{'name'} $RAD_PERLCONF{'sub-config'}->{'name'}
+	#
+	#config {
+	#	name = "value"
+	#	sub-config {
+	#		name = "value of name from config.sub-config"
+	#	}
+	#}
+	
+	#
+	#  List of functions in the module to call.
+	#  Uncomment and change if you want to use function
+	#  names other than the defaults.
+	#
+	#func_authenticate = authenticate
+	#func_authorize = authorize
+	#func_preacct = preacct
+	#func_accounting = accounting
+	#func_checksimul = checksimul
+	#func_pre_proxy = pre_proxy
+	#func_post_proxy = post_proxy
+	#func_post_auth = post_auth
+	#func_recv_coa = recv_coa
+	#func_send_coa = send_coa
+	#func_xlat = xlat
+	#func_detach = detach
+
+	#
+	#  Uncomment the following lines if you wish
+	#  to use separate functions for Start and Stop
+	#  accounting packets. In that case, the
+	#  func_accounting function is not called.
+	#
+	#func_start_accounting = accounting_start
+	#func_stop_accounting = accounting_stop
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/preprocess b/src/test/setup/radius-config/freeradius/mods-available/preprocess
new file mode 100644
index 0000000..ae349e9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/preprocess
@@ -0,0 +1,62 @@
+# -*- text -*-
+#
+#  $Id: 8baec7961ba75fe52546cb1331868b0b2b1c38f4 $
+
+# Preprocess the incoming RADIUS request, before handing it off
+# to other modules.
+#
+#  This module processes the 'huntgroups' and 'hints' files.
+#  In addition, it re-writes some weird attributes created
+#  by some NAS, and converts the attributes into a form which
+#  is a little more standard.
+#
+preprocess {
+	# Search for files in a subdirectory of mods-config which
+	# matches this instance of the preprocess module.
+	moddir = ${modconfdir}/${.:instance}
+
+	huntgroups = ${moddir}/huntgroups
+	hints = ${moddir}/hints
+
+	# This hack changes Ascend's weird port numbering
+	# to standard 0-??? port numbers so that the "+" works
+	# for IP address assignments.
+	with_ascend_hack = no
+	ascend_channels_per_line = 23
+
+	# Windows NT machines often authenticate themselves as
+	# NT_DOMAIN\username
+	#
+	# If this is set to 'yes', then the NT_DOMAIN portion
+	# of the user-name is silently discarded.
+	#
+	# This configuration entry SHOULD NOT be used.
+	# See the "realms" module for a better way to handle
+	# NT domains.
+	with_ntdomain_hack = no
+
+	# Specialix Jetstream 8500 24 port access server.
+	#
+	# If the user name is 10 characters or longer, a "/"
+	# and the excess characters after the 10th are
+	# appended to the user name.
+	#
+	# If you're not running that NAS, you don't need
+	# this hack.
+	with_specialix_jetstream_hack = no
+
+	# Cisco (and Quintum in Cisco mode) sends it's VSA attributes
+	# with the attribute name *again* in the string, like:
+	#
+	#   H323-Attribute = "h323-attribute=value".
+	#
+	# If this configuration item is set to 'yes', then
+	# the redundant data in the the attribute text is stripped
+	# out.  The result is:
+	#
+	#  H323-Attribute = "value"
+	#
+	# If you're not running a Cisco or Quintum NAS, you don't
+	# need this hack.
+	with_cisco_vsa_hack = no
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/python b/src/test/setup/radius-config/freeradius/mods-available/python
new file mode 100644
index 0000000..dcaaef2
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/python
@@ -0,0 +1,47 @@
+#
+# Make sure the PYTHONPATH environmental variable contains the
+# directory(s) for the modules listed below.
+#
+# Uncomment any func_* which are included in your module. If
+# rlm_python is called for a section which does not have
+# a function defined, it will return NOOP.
+#
+python {
+	module = example
+
+	mod_instantiate = ${.module}
+#	func_instantiate = instantiate
+
+	mod_detach = ${.module}
+#	func_detach = instantiate
+
+	mod_authorize = ${.module}
+#	func_authorize = authorize
+
+	mod_authenticate = ${.module}
+#	func_authenticate = authenticate
+
+	mod_preacct = ${.module}
+#	func_preacct = preacct
+
+	mod_accounting = ${.module}
+#	func_accounting = accounting
+
+	mod_checksimul = ${.module}
+#	func_checksimul = checksimul
+
+	mod_pre_proxy = ${.module}
+#	func_pre_proxy = pre_proxy
+
+	mod_post_proxy = ${.module}
+#	func_post_proxy = post_proxy
+
+	mod_post_auth = ${.module}
+#	func_post_auth = post_auth
+
+	mod_recv_coa = ${.module}
+#	func_recv_coa = recv_coa
+
+	mod_send_coa = ${.module}
+#	func_send_coa = send_coa
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/radutmp b/src/test/setup/radius-config/freeradius/mods-available/radutmp
new file mode 100644
index 0000000..8430fc1
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/radutmp
@@ -0,0 +1,53 @@
+# -*- text -*-
+#
+#  $Id: 82319c033bbf349991a46b8f198a5bf5487b5da8 $
+
+#  Write a 'utmp' style file, of which users are currently
+#  logged in, and where they've logged in from.
+#
+#  This file is used mainly for Simultaneous-Use checking,
+#  and also 'radwho', to see who's currently logged in.
+#
+radutmp {
+	#  Where the file is stored.  It's not a log file,
+	#  so it doesn't need rotating.
+	#
+	filename = ${logdir}/radutmp
+
+	#  The field in the packet to key on for the
+	#  'user' name,  If you have other fields which you want
+	#  to use to key on to control Simultaneous-Use,
+	#  then you can use them here.
+	#
+	#  Note, however, that the size of the field in the
+	#  'utmp' data structure is small, around 32
+	#  characters, so that will limit the possible choices
+	#  of keys.
+	#
+	#  You may want instead: %{%{Stripped-User-Name}:-%{User-Name}}
+	username = %{User-Name}
+
+
+	#  Whether or not we want to treat "user" the same
+	#  as "USER", or "User".  Some systems have problems
+	#  with case sensitivity, so this should be set to
+	#  'no' to enable the comparisons of the key attribute
+	#  to be case insensitive.
+	#
+	case_sensitive = yes
+
+	#  Accounting information may be lost, so the user MAY
+	#  have logged off of the NAS, but we haven't noticed.
+	#  If so, we can verify this information with the NAS,
+	#
+	#  If we want to believe the 'utmp' file, then this
+	#  configuration entry can be set to 'no'.
+	#
+	check_with_nas = yes
+
+	# Set the file permissions, as the contents of this file
+	# are usually private.
+	permissions = 0600
+
+	caller_id = "yes"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/realm b/src/test/setup/radius-config/freeradius/mods-available/realm
new file mode 100644
index 0000000..c1984d0
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/realm
@@ -0,0 +1,46 @@
+# -*- text -*-
+#
+#  $Id: 0b87548aac96952f1fa07410578e482496cb75e8 $
+
+# Realm module, for proxying.
+#
+#  You can have multiple instances of the realm module to
+#  support multiple realm syntaxes at the same time.  The
+#  search order is defined by the order that the modules are listed
+#  in the authorize and preacct sections.
+#
+#  Four config options:
+#	format	 -  must be "prefix" or "suffix"
+#			  The special cases of "DEFAULT"
+#			  and "NULL" are allowed, too.
+#	delimiter      -  must be a single character
+
+#  'realm/username'
+#
+#  Using this entry, IPASS users have their realm set to "IPASS".
+realm IPASS {
+	format = prefix
+	delimiter = "/"
+}
+
+#  'username@realm'
+#
+realm suffix {
+	format = suffix
+	delimiter = "@"
+}
+
+#  'username%realm'
+#
+realm realmpercent {
+	format = suffix
+	delimiter = "%"
+}
+
+#
+#  'domain\user'
+#
+realm ntdomain {
+	format = prefix
+	delimiter = "\\"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/redis b/src/test/setup/radius-config/freeradius/mods-available/redis
new file mode 100644
index 0000000..a47a046
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/redis
@@ -0,0 +1,55 @@
+# -*- text -*-
+#
+#  $Id: 8750f989839fdcebfe106ef6574e8c96f93cdefa $
+
+#
+#  Configuration file for the "redis" module.  This module does nothing
+#  Other than provide connections to a redis database, and a %{redis: ...}
+#  expansion.
+#
+redis {
+	#  Host where the redis server is located.
+	#  We recommend using ONLY 127.0.0.1 !
+	server = 127.0.0.1
+
+	#  The default port.
+	port = 6379
+
+	#  The password used to authenticate to the server.
+	#  We recommend using a strong password.
+#	password = thisisreallysecretandhardtoguess
+
+	#
+	#  Information for the connection pool.  The configuration items
+	#  below are the same for all modules which use the new
+	#  connection pool.
+	#
+	pool {
+		# start this many connections
+		start = 1
+
+		# Keep at least "min" connections open
+		min = 1
+
+		# No more than "max" connections at any one time
+		max = 10
+
+		# try to keep "spare" connections
+		spare = 0
+
+		# The pool is checked for free connections every
+		# "cleanup_interval".  If there are free connections,
+		# then one of them is closed.
+		cleanup_interval = 300
+
+		# connections last no more than "lifetime" seconds.
+		lifetime = 86400
+
+		# close idle connections are "idle_timeout" seconds
+		idle_timeout = 600
+
+		# allow no more than "uses" queries through a connection.
+		# after that, close it and open a new one.
+		uses = 0
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/rediswho b/src/test/setup/radius-config/freeradius/mods-available/rediswho
new file mode 100644
index 0000000..0471d26
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/rediswho
@@ -0,0 +1,46 @@
+# -*- text -*-
+#
+#  $Id: dba8c583f08db3490f74127d680c3d7ce5d1c572 $
+
+#
+#  Configuration file for the "rediswho" module.
+#
+#  This module tracks the last set of login sessions for a user.
+#
+rediswho {
+	#  How many sessions to keep track of per user.
+	#  If there are more than this number, older sessions are deleted.
+	trim_count = 15
+
+	#  Expiry time in seconds.  Any sessions which have not received
+	#  an update in this time will be automatically expired.
+	expire_time = 86400
+
+	#
+	#  Each subsection contains insert / trim / expire queries.
+	#  The subsections are named after the contents of the
+	#  Acct-Status-Type attribute.  See dictionary.rfc2866 for names
+	#  of the various Acct-Status-Type values, or look at the output
+	#  of debug mode.
+	#
+	#  This module supports *any* Acct-Status-Type.  Just add a subsection
+	#  of the appropriate name, along with insert / trim / expire queries.
+	#
+	Start {
+		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
+		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
+		expire = "EXPIRE %{User-Name} ${..expire_time}"
+	}
+
+	Interim-Update {
+		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
+		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
+		expire = "EXPIRE %{User-Name} ${..expire_time}"
+	}
+
+	Stop {
+		insert = "LPUSH %{User-Name} %l,%{Acct-Session-Id},%{NAS-IP-Address},%{Acct-Session-Time},%{Framed-IP-Address},%{%{Acct-Input-Gigawords}:-0},%{%{Acct-Output-Gigawords}:-0},%{%{Acct-Input-Octets}:-0},%{%{Acct-Output-Octets}:-0}"
+		trim =   "LTRIM %{User-Name} 0 ${..trim_count}"
+		expire = "EXPIRE %{User-Name} ${..expire_time}"
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/replicate b/src/test/setup/radius-config/freeradius/mods-available/replicate
new file mode 100644
index 0000000..6df4523
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/replicate
@@ -0,0 +1,40 @@
+#  Replicate packet(s) to a home server.
+#
+#  This module will open a new socket for each packet, and "clone"
+#  the incoming packet to the destination realm (i.e. home server).
+#
+#  Use it by setting "Replicate-To-Realm = name" in the control list,
+#  just like Proxy-To-Realm.  The configurations for the two attributes
+#  are identical.  The realm must exist, the home_server_pool must exist,
+#  and the home_server must exist.
+#
+#  The only difference is that the "replicate" module sends requests
+#  and does not expect a reply.  Any reply is ignored.
+#
+#  Both Replicate-To-Realm and Proxy-To-Realm can be used at the same time.
+#
+#  To use this module, list "replicate" in the "authorize" or
+#  "accounting" section.  Then, ensure that Replicate-To-Realm is set.
+#  The contents of the "packet" attribute list will be sent to the
+#  home server.  The usual load-balancing, etc. features of the home
+#  server will be used.
+#
+#  "radmin" can be used to mark home servers alive/dead, in order to
+#  enable/disable replication to specific servers.
+#
+#  Packets can be replicated to multiple destinations.  Just set
+#  Replicate-To-Realm multiple times.  One packet will be sent for
+#  each of the Replicate-To-Realm attribute in the "control" list.
+#
+#  If no packets are sent, the module returns "noop".  If at least one
+#  packet is sent, the module returns "ok".  If an error occurs, the
+#  module returns "fail"
+#
+#  Note that replication does NOT change any of the packet statistics.
+#  If you use "radmin" to look at the statistics for a home server,
+#  the replicated packets will cause NO counters to increment.  This
+#  is not a bug, this is how replication works.
+#
+replicate {
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/rest b/src/test/setup/radius-config/freeradius/mods-available/rest
new file mode 100644
index 0000000..19b9de6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/rest
@@ -0,0 +1,132 @@
+rest {
+	#
+	#  This subsection configures the tls related items
+	#  that control how FreeRADIUS connects to a HTTPS
+	#  server.
+	#
+	tls {
+#		ca_file	= ${certdir}/cacert.pem
+#		ca_path	= ${certdir}
+
+#		certificate_file	= /path/to/radius.crt
+#		private_key_file	= /path/to/radius.key
+#		private_key_password	= "supersecret"
+#		random_file		= ${certdir}/random
+
+		#  Server certificate verification requirements.  Can be:
+		#    "no"  (don't even bother trying)
+		#    "yes" (verify the cert was issued by one of the
+		#	   trusted CAs)
+		#
+		#  The default is "yes"
+#		check_cert     = "yes"
+
+		#  Server certificate CN verification requirements.  Can be:
+		#    "no"  (don't even bother trying)
+		#    "yes" (verify the CN in the certificate matches the host
+		#	   in the URI)
+		#
+		#  The default is "yes"
+#		check_cert_cn  = "yes"
+	}
+
+	# rlm_rest will open a connection to the server specified in connect_uri
+	# to populate the connection cache, ready for the first request.
+	# The server will not start if the server specified is unreachable.
+	#
+	# If you wish to disable this pre-caching and reachability check,
+	# comment out the configuration item below.
+	connect_uri = "http://127.0.0.1/"
+
+	#
+	#  The following config items can be used in each of the sections.
+	#  The sections themselves reflect the sections in the server.
+	#  For example if you list rest in the authorize section of a virtual server,
+	#  the settings from the authorize section here will be used.
+	#
+	#  The following config items may be listed in any of the sections:
+	#    uri          - to send the request to.
+	#    method       - HTTP method to use, one of 'get', 'post', 'put', 'delete'.
+	#    body         - The format of the HTTP body sent to the remote server.
+	#                   May be 'none', 'post' or 'json', defaults to 'none'.
+	#    tls          - TLS settings for HTTPS.
+	#    auth         - HTTP auth method to use, one of 'none', 'srp', 'basic',
+	#                   'digest', 'digest-ie', 'gss-negotiate', 'ntlm',
+	#                   'ntlm-winbind', 'any', 'safe'. defaults to 'none'.
+	#    username     - User to authenticate as, will be expanded.
+	#    password     - Password to use for authentication, will be expanded.
+	#    require_auth - Require HTTP authentication.
+	#    timeout      - HTTP request timeout in seconds, defaults to 4.
+	#
+	authorize {
+		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=authorize"
+		method = "get"
+		tls = ${..tls}
+	}
+	authenticate {
+		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=authenticate"
+		method = "get"
+		tls = ${..tls}
+	}
+	accounting {
+		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=accounting"
+		method = "post"
+		tls = ${..tls}
+	}
+	session {
+		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=checksimul"
+		method = "post"
+		tls = ${..tls}
+	}
+	post-auth {
+		uri = "${..connect_uri}/user/%{User-Name}/mac/%{Called-Station-ID}?section=post-auth"
+		method = "post"
+		tls = ${..tls}
+	}
+
+	#
+	#  The connection pool is new for 3.0, and will be used in many
+	#  modules, for all kinds of connection-related activity.
+	#
+	pool {
+		# Number of connections to start
+		start = 5
+
+		# Minimum number of connections to keep open
+		min = 4
+
+		# Maximum number of connections
+		#
+		# If these connections are all in use and a new one
+		# is requested, the request will NOT get a connection.
+		max = 10
+
+		# Spare connections to be left idle
+		#
+		# NOTE: Idle connections WILL be closed if "idle_timeout"
+		# is set.
+		spare = 3
+
+		# Number of uses before the connection is closed
+		#
+		# 0 means "infinite"
+		uses = 0
+
+		# The lifetime (in seconds) of the connection
+		lifetime = 0
+
+		# idle timeout (in seconds).  A connection which is
+		# unused for this length of time will be closed.
+		idle_timeout = 60
+
+		# NOTE: All configuration settings are enforced.  If a
+		# connection is closed because of "idle_timeout",
+		# "uses", or "lifetime", then the total number of
+		# connections MAY fall below "min".  When that
+		# happens, it will open a new connection.  It will
+		# also log a WARNING message.
+		#
+		# The solution is to either lower the "min" connections,
+		# or increase lifetime/idle_timeout.
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/smbpasswd b/src/test/setup/radius-config/freeradius/mods-available/smbpasswd
new file mode 100644
index 0000000..de400ee
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/smbpasswd
@@ -0,0 +1,16 @@
+# -*- text -*-
+#
+#  $Id: d5ad2a06c767f07722dc9b9c4b13d00c26b5a280 $
+
+#  An example configuration for using /etc/smbpasswd.
+#
+#  See the "passwd" file for documentation on the configuration items
+#  for this module.
+#
+passwd smbpasswd {
+	filename = /etc/smbpasswd
+	format = "*User-Name::LM-Password:NT-Password:SMB-Account-CTRL-TEXT::"
+	hash_size = 100
+	ignore_nislike = no
+	allow_multiple_keys = no
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/smsotp b/src/test/setup/radius-config/freeradius/mods-available/smsotp
new file mode 100644
index 0000000..876931c
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/smsotp
@@ -0,0 +1,94 @@
+# -*- text -*-
+#
+#  $Id: 3be32b85f56a84725fe1a6bf508e459dbe6c4e02 $
+
+#  SMS One-time Password system.
+#
+#  This module will extend FreeRadius with a socks interface to create and
+#  validate One-Time-Passwords. The program for that creates the socket
+#  and interacts with this module is not included here.
+#
+#  The module does not check the User-Password, this should be done with
+#  the "pap" module.  See the example below.
+#
+#  The module must be used in the "authorize" section to set
+#  Auth-Type properly.  The first time through, the module is called
+#  in the "authenticate" section to authenticate the user password, and
+#  to send the challenge.  The second time through, it authenticates
+#  the response to the challenge. e.g.:
+#
+#  authorize {
+#	...
+#	smsotp
+#	...
+#  }
+#
+#  authenticate {
+#	...
+#	Auth-Type smsotp {
+#		pap
+#		smsotp
+#	}
+#
+#	Auth-Type smsotp-reply {
+#		smsotp
+#	}
+#	...
+#  }
+#
+smsotp {
+	#  The location of the socket.
+	socket = "/var/run/smsotp_socket"
+
+	#  Defines the challenge message that will be send to the
+	#  NAS. Default is "Enter Mobile PIN" }
+	challenge_message = "Enter Mobile PIN:"
+
+	#  Defines the Auth-Type section that is run for the response to
+	#  the challenge. Default is "smsotp-reply".
+	challenge_type = "smsotp-reply"
+
+	#  Control how many sockets are used to talk to the SMSOTPd
+	#
+	pool {
+		# Number of connections to start
+		start = 5
+
+		# Minimum number of connections to keep open
+		min = 4
+
+		# Maximum number of connections
+		#
+		# If these connections are all in use and a new one
+		# is requested, the request will NOT get a connection.
+		max = 10
+
+		# Spare connections to be left idle
+		#
+		# NOTE: Idle connections WILL be closed if "idle_timeout"
+		# is set.
+		spare = 3
+
+		# Number of uses before the connection is closed
+		#
+		# 0 means "infinite"
+		uses = 0
+
+		# The lifetime (in seconds) of the connection
+		lifetime = 0
+
+		# idle timeout (in seconds).  A connection which is
+		# unused for this length of time will be closed.
+		idle_timeout = 60
+
+		# NOTE: All configuration settings are enforced.  If a
+		# connection is closed because of "idle_timeout",
+		# "uses", or "lifetime", then the total number of
+		# connections MAY fall below "min".  When that
+		# happens, it will open a new connection.  It will
+		# also log a WARNING message.
+		#
+		# The solution is to either lower the "min" connections,
+		# or increase lifetime/idle_timeout.
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/soh b/src/test/setup/radius-config/freeradius/mods-available/soh
new file mode 100644
index 0000000..d125ce4
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/soh
@@ -0,0 +1,4 @@
+# SoH module
+soh {
+	dhcp = yes
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sometimes b/src/test/setup/radius-config/freeradius/mods-available/sometimes
new file mode 100644
index 0000000..094426d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/sometimes
@@ -0,0 +1,12 @@
+# -*- text -*-
+#
+#  $Id: 3a96622cc938f558b023e1110769a46861716a12 $
+
+#
+# The "sometimes" module is here for debugging purposes. Each instance
+# randomly returns the configured result, or "noop".
+#
+# It is based on the "always" module.
+sometimes {
+	rcode = fail
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sql b/src/test/setup/radius-config/freeradius/mods-available/sql
new file mode 100644
index 0000000..f4c92d5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/sql
@@ -0,0 +1,971 @@
+# -*- text -*-
+##
+## sql.conf -- SQL modules
+##
+##	$Id: e1431d634a28f20a0e5deaeedd66a161deb88eb7 $
+
+######################################################################
+#
+#  Configuration for the SQL module
+#
+#  The database schemas and queries are located in subdirectories:
+#
+#	sql/<DB>/main/schema.sql	Schema
+#	sql/<DB>/main/queries.conf	Authorisation and Accounting queries
+#
+#  Where "DB" is mysql, mssql, oracle, or postgresql.
+#
+#
+
+sql {
+	# The sub-module to use to execute queries. This should match
+	# the database you're attempting to connect to.
+	#
+	#    * rlm_sql_mysql
+	#    * rlm_sql_mssql
+	#    * rlm_sql_oracle
+	#    * rlm_sql_postgresql
+	#    * rlm_sql_sqlite
+	#    * rlm_sql_null (log queries to disk)
+	#
+	driver = "rlm_sql_sqlite"
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+ 
+	sqlite {
+		filename = "/opt/db/radius.sqlite3"
+	}
+
+#
+#	Several drivers accept specific options, to set them, a
+#	config section with the the name as the driver should be added
+#	to the sql instance.
+#
+#	Driver specific options are:
+#
+#	sqlite {
+#		# Path to the sqlite database
+#		filename = "/my/sqlite/database.db"
+#
+#		# If the file above does not exist and bootstrap is set
+#		# a new database file will be created, and the SQL statements
+#		# contained within the file will be executed.
+#		bootstrap = "/my/sqlite/schema.sql"
+# 	}
+#
+#	mysql {
+#		# If any of the below files are set tls encryption is enabled
+#		tls {
+#			ca_file = "/etc/ssl/certs/my_ca.crt"
+#			ca_path = "/etc/ssl/certs/"
+#			certificate_file = "/etc/ssl/certs/private/client.crt"
+#			private_key_file = "/etc/ssl/certs/private/client.key"
+#			cipher = "DHE-RSA-AES256-SHA:AES128-SHA"
+#		}
+#	}
+#
+
+	# The dialect of SQL you want to use, this should usually match
+	# the driver you selected above.
+	#
+	# If you're using rlm_sql_null, then it should be the type of
+	# database the logged queries are going to be executed against.
+	dialect = "sqlite"
+
+	# Connection info:
+	#
+#	server = "localhost"
+#	port = 3306
+#	login = "radius"
+#	password = "radpass"
+
+	# Database table configuration for everything except Oracle
+	radius_db = "radius"
+
+	# If you are using Oracle then use this instead
+#	radius_db = "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=localhost)(PORT=1521))(CONNECT_DATA=(SID=your_sid)))"
+
+	# If you're using postgresql this can also be used instead of the connection info parameters
+#	radius_db = "dbname=radius host=localhost user=radius password=raddpass"
+
+	# If you want both stop and start records logged to the
+	# same SQL table, leave this as is.  If you want them in
+	# different tables, put the start table in acct_table1
+	# and stop table in acct_table2
+	acct_table1 = "radacct"
+	acct_table2 = "radacct"
+
+	# Allow for storing data after authentication
+	postauth_table = "radpostauth"
+
+	# Tables containing 'check' items
+	authcheck_table = "radcheck"
+	groupcheck_table = "radgroupcheck"
+
+	# Tables containing 'reply' items
+	authreply_table = "radreply"
+	groupreply_table = "radgroupreply"
+
+	# Table to keep group info
+	usergroup_table = "radusergroup"
+
+	# If set to 'yes' (default) we read the group tables
+	# If set to 'no' the user MUST have Fall-Through = Yes in the radreply table
+	# read_groups = yes
+
+	# Remove stale session if checkrad does not see a double login
+	delete_stale_sessions = yes
+
+	# Write SQL queries to a logfile. This is potentially useful for tracing
+	# issues with authorization queries.
+#	logfile = ${logdir}/sqllog.sql
+
+	#  As of version 3.0, the "pool" section has replaced the
+	#  following configuration items:
+	#
+	#  num_sql_socks
+	#  connect_failure_retry_delay
+	#  lifetime
+	#  max_queries
+
+	#
+	#  The connection pool is new for 3.0, and will be used in many
+	#  modules, for all kinds of connection-related activity.
+	#
+	# When the server is not threaded, the connection pool
+	# limits are ignored, and only one connection is used.
+	#
+	pool {
+		# Number of connections to start
+		start = 5
+
+		# Minimum number of connections to keep open
+		min = 4
+
+		# Maximum number of connections
+		#
+		# If these connections are all in use and a new one
+		# is requested, the request will NOT get a connection.
+		#
+		# Setting 'max' to LESS than the number of threads means
+		# that some threads may starve, and you will see errors
+		# like "No connections available and at max connection limit"
+		#
+		# Setting 'max' to MORE than the number of threads means
+		# that there are more connections than necessary.
+		#
+		max = ${thread[pool].max_servers}
+
+		# Spare connections to be left idle
+		#
+		# NOTE: Idle connections WILL be closed if "idle_timeout"
+		# is set.
+		spare = 3
+
+		# Number of uses before the connection is closed
+		#
+		# 0 means "infinite"
+		uses = 0
+
+		# The lifetime (in seconds) of the connection
+		lifetime = 0
+
+		# idle timeout (in seconds).  A connection which is
+		# unused for this length of time will be closed.
+		idle_timeout = 60
+
+		# NOTE: All configuration settings are enforced.  If a
+		# connection is closed because of "idle_timeout",
+		# "uses", or "lifetime", then the total number of
+		# connections MAY fall below "min".  When that
+		# happens, it will open a new connection.  It will
+		# also log a WARNING message.
+		#
+		# The solution is to either lower the "min" connections,
+		# or increase lifetime/idle_timeout.
+	}
+
+	# Set to 'yes' to read radius clients from the database ('nas' table)
+	# Clients will ONLY be read on server startup.
+#	read_clients = yes
+
+	# Table to keep radius client info
+	client_table = "nas"
+
+	# Read database-specific queries
+	$INCLUDE ${modconfdir}/${.:name}/main/${dialect}/queries.conf
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sqlcounter b/src/test/setup/radius-config/freeradius/mods-available/sqlcounter
new file mode 100644
index 0000000..89d6d40
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/sqlcounter
@@ -0,0 +1,95 @@
+#  Rather than maintaining separate (GDBM) databases of
+#  accounting info for each counter, this module uses the data
+#  stored in the raddacct table by the sql modules. This
+#  module NEVER does any database INSERTs or UPDATEs.  It is
+#  totally dependent on the SQL module to process Accounting
+#  packets.
+#
+#  The sql-module-instance' parameter holds the instance of the sql
+#  module to use when querying the SQL database. Normally it
+#  is just "sql".  If you define more and one SQL module
+#  instance (usually for failover situations), you can
+#  specify which module has access to the Accounting Data
+#  (radacct table).
+#
+#  The 'reset' parameter defines when the counters are all
+#  reset to zero.  It can be hourly, daily, weekly, monthly or
+#  never.  It can also be user defined. It should be of the
+#  form:
+#  	num[hdwm] where:
+#  	h: hours, d: days, w: weeks, m: months
+#  	If the letter is ommited days will be assumed. In example:
+#  	reset = 10h (reset every 10 hours)
+#  	reset = 12  (reset every 12 days)
+#
+#  The 'key' parameter specifies the unique identifier for the
+#  counter records (usually 'User-Name').
+#
+#  The 'query' parameter specifies the SQL query used to get
+#  the current Counter value from the database. There are 3
+#  parameters that can be used in the query:
+#		%b	unix time value of beginning of reset period
+#		%e	unix time value of end of reset period
+#
+#  The 'check_name' parameter is the name of the 'check'
+#  attribute to use to access the counter in the 'users' file
+#  or SQL radcheck or radcheckgroup tables.
+#
+#  DEFAULT  Max-Daily-Session > 3600, Auth-Type = Reject
+#      Reply-Message = "You've used up more than one hour today"
+#
+sqlcounter dailycounter {
+	sql_module_instance = sql
+	dialect = ${modules.sql.dialect}
+
+	counter_name = Daily-Session-Time
+	check_name = Max-Daily-Session
+	reply_name = Session-Timeout
+
+	key = User-Name
+	reset = daily
+
+	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
+}
+
+sqlcounter monthlycounter {
+	sql_module_instance = sql
+	dialect = ${modules.sql.dialect}
+
+	counter_name = Monthly-Session-Time
+	check_name = Max-Monthly-Session
+	reply_name = Session-Timeout
+	key = User-Name
+	reset = monthly
+
+	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
+}
+
+sqlcounter noresetcounter {
+	sql_module_instance = sql
+	dialect = ${modules.sql.dialect}
+
+	counter_name = Max-All-Session-Time
+	check_name = Max-All-Session
+	key = User-Name
+	reset = never
+
+	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
+}
+
+#
+#  Set an account to expire T seconds after first login.
+#  Requires the Expire-After attribute to be set, in seconds.
+#  You may need to edit raddb/dictionary to add the Expire-After
+#  attribute.
+sqlcounter expire_on_login {
+	sql_module_instance = sql
+	dialect = ${modules.sql.dialect}
+
+	counter_name = Expire-After-Initial-Login
+	check_name = Expire-After
+	key = User-Name
+	reset = never
+
+	$INCLUDE ${modconfdir}/sql/counter/${dialect}/${.:instance}.conf
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sqlippool b/src/test/setup/radius-config/freeradius/mods-available/sqlippool
new file mode 100644
index 0000000..269c072
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/sqlippool
@@ -0,0 +1,65 @@
+#  Configuration for the SQL based IP Pool module (rlm_sqlippool)
+#
+#  The database schemas are available at:
+#
+#       raddb/sql/ippool/<DB>/schema.sql
+#
+#  $Id: 26960222182b4656ed895e365a4ca4659d87e2a9 $
+
+sqlippool {
+	# SQL instance to use (from sql.conf)
+	#
+	#  If you have multiple sql instances, such as "sql sql1 {...}",
+	#  use the *instance* name here: sql1.
+	sql_module_instance = "sql"
+
+	#  This is duplicative of info available in the SQL module, but
+	#  we have to list it here as we do not yet support nested
+	#  reference expansions.
+	dialect = "mysql"
+
+	# SQL table to use for ippool range and lease info
+	ippool_table = "radippool"
+
+	# IP lease duration. (Leases expire even if Acct Stop packet is lost)
+	lease_duration = 3600
+
+	# Attribute which should be considered unique per NAS
+	#
+	#  Using NAS-Port gives behaviour similar to rlm_ippool. (And ACS)
+	#  Using Calling-Station-Id works for NAS that send fixed NAS-Port
+	#  ONLY change this if you know what you are doing!
+	pool_key = "%{NAS-Port}"
+	# pool_key = "%{Calling-Station-Id}"
+
+	################################################################
+	#
+	#  WARNING: MySQL (MyISAM) has certain limitations that means it can
+	#           hand out the same IP address to 2 different users.
+	#
+	#           We suggest using an SQL DB with proper transaction
+	#           support, such as PostgreSQL, or using MySQL
+	#	     with InnoDB.
+	#
+	################################################################
+
+	#  These messages are added to the "control" items, as
+	#  Module-Success-Message.  They are not logged anywhere else,
+	#  unlike previous versions.  If you want to have them logged
+	#  to a file, see the "linelog" module, and create an entry
+	#  which writes Module-Success-Message message.
+	#
+	messages {
+		exists = "Existing IP: %{reply:Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+		success = "Allocated IP: %{reply:Framed-IP-Address} from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+		clear = "Released IP %{Framed-IP-Address} (did %{Called-Station-Id} cli %{Calling-Station-Id} user %{User-Name})"
+
+		failed = "IP Allocation FAILED from %{control:Pool-Name} (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+
+		nopool = "No Pool-Name defined (did %{Called-Station-Id} cli %{Calling-Station-Id} port %{NAS-Port} user %{User-Name})"
+	}
+
+	$INCLUDE ${modconfdir}/sql/ippool/${dialect}/queries.conf
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/sradutmp b/src/test/setup/radius-config/freeradius/mods-available/sradutmp
new file mode 100644
index 0000000..8e28704
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/sradutmp
@@ -0,0 +1,16 @@
+# -*- text -*-
+#
+#  $Id: 3a2a0e502e76ec00d4ec17e70132448e1547da46 $
+
+# "Safe" radutmp - does not contain caller ID, so it can be
+# world-readable, and radwho can work for normal users, without
+# exposing any information that isn't already exposed by who(1).
+#
+# This is another 'instance' of the radutmp module, but it is given
+# then name "sradutmp" to identify it later in the "accounting"
+# section.
+radutmp sradutmp {
+	filename = ${logdir}/sradutmp
+	permissions = 0644
+	caller_id = "no"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unbound b/src/test/setup/radius-config/freeradius/mods-available/unbound
new file mode 100644
index 0000000..9fd9b1f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/unbound
@@ -0,0 +1,4 @@
+unbound dns {
+	# filename = "${raddbdir}/mods-config/unbound/default.conf"
+	# timeout = 3000
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unix b/src/test/setup/radius-config/freeradius/mods-available/unix
new file mode 100644
index 0000000..a5798d5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/unix
@@ -0,0 +1,25 @@
+# -*- text -*-
+#
+#  $Id: 5165139aaf39d533581161871542b48a6e3e8c42 $
+
+# Unix /etc/passwd style authentication
+#
+#  This module calls the system functions to get the "known good"
+#  password.  This password is usually in the "crypt" form, and is
+#  incompatible with CHAP, MS-CHAP, PEAP, etc.
+#
+#  If passwords are in /etc/shadow, you will need to set the "group"
+#  configuration in radiusd.conf.  Look for "shadow", and follow the
+#  instructions there.
+#
+unix {
+	#
+	#  The location of the "wtmp" file.
+	#  The only use for 'radlast'.  If you don't use
+	#  'radlast', then you can comment out this item.
+	#
+	#  Note that the radwtmp file may get large!  You should
+	#  rotate it (cp /dev/null radwtmp), or just not use it.
+	#
+	radwtmp = ${logdir}/radwtmp
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/unpack b/src/test/setup/radius-config/freeradius/mods-available/unpack
new file mode 100644
index 0000000..6e42ad1
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/unpack
@@ -0,0 +1,42 @@
+# -*- text -*-
+#
+#  $Id: 2a1e130d315daa247167372773c1994e3200f332 $
+
+#
+#  This module is useful only for 'xlat'.  To use it,
+#  add it to the raddb/mods-enabled/ directory.  Then,
+#  use it on the right-hand side of a variable assignment.
+#
+#  ... = "%{unpack:data 1 integer}"
+#
+#  The arguments are three fields:
+#
+#	data
+#		Either &Attribute-Name
+#		the name of the attribute to unpack.
+#		MUST be a "string" or "octets" type.
+#
+#		or 0xabcdef
+#		e.g. hex data.
+#
+#	1
+#		The offset into the string from which
+#		it starts unpacking.  The offset starts
+#		at zero, for the first attribute.
+#
+#	integer
+#		the data type to unpack at that offset.
+#		e.g. integer, ipaddr, byte, short, etc.
+#
+#  e.g. if we have Class = 0x00000001020304, then
+#
+#	%{unpack:&Class 4 short}
+#
+#  will unpack octets 4 and 5 as a "short", which has
+#  value 0x0304.
+#
+#  This module is used when vendors put multiple fields
+#  into one attribute of type "octets".
+#
+unpack {
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/utf8 b/src/test/setup/radius-config/freeradius/mods-available/utf8
new file mode 100644
index 0000000..00812fa
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/utf8
@@ -0,0 +1,14 @@
+#
+#  Enforces UTF-8 on strings coming in from the NAS.
+#
+#  An attribute of type "string" containing UTF-8 makes
+#  the module return NOOP.
+#
+#  An attribute of type "string" containing non-UTF-8 data
+#  makes the module return FAIL.
+#
+#  This module takes no configuration.
+#
+utf8 {
+
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/wimax b/src/test/setup/radius-config/freeradius/mods-available/wimax
new file mode 100644
index 0000000..c2aa42f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/wimax
@@ -0,0 +1,112 @@
+#
+#	The WiMAX module currently takes no configuration.
+#
+#	It should be listed in the "authorize" and "preacct" sections.
+#	This enables the module to fix the horrible binary version
+#	of Calling-Station-Id to the normal format, as specified in
+#	RFC 3580, Section 3.21.
+#
+#	In order to calculate the various WiMAX keys, the module should
+#	be listed in the "post-auth" section.  If EAP authentication
+#	has been used, AND the EAP method derives MSK and EMSK, then
+#	the various WiMAX keys can be calculated.
+#
+#	Some useful things to remember:
+#
+#	WiMAX-MSK = EAP MSK, but is 64 octets.
+#
+#	MIP-RK-1 = HMAC-SHA256(ESMK, "miprk@wimaxforum.org" | 0x00020001)
+#	MIP-RK-2 = HMAC-SHA256(ESMK, MIP-RK-1 | "miprk@wimaxforum.org" | 0x00020002)
+#	MIP-RK = MIP-RK-1 | MIP-RK-2
+#
+#	MIP-SPI = first 4 octets of HMAC-SHA256(MIP-RK, "SPI CMIP PMIP")
+#		plus some magic... you've got to track *all* MIP-SPI's
+#		on your system!
+#
+#	SPI-CMIP4 = MIP-SPI
+#	SPI-PMIP4 = MIP-SPI + 1
+#	SPI-CMIP6 = MIP-SPI + 2
+#
+#	MN-NAI is the Mobile node NAI.  You have to create it, and put
+#	it into the request or reply as something like:
+#
+#		WiMAX-MN-NAI = "%{User-Name}"
+#
+#	You will also have to have the appropriate IP address (v4 or v6)
+#	in order to calculate the keys below.
+#
+#	Lifetimes are derived from Session-Timeout.  It needs to be set
+#	to some useful number.
+#
+#	The hash function below H() is HMAC-SHA1.
+#
+#
+#	MN-HA-CMIP4 = H(MIP-RK, "CMIP4 MN HA" | HA-IPv4 | MN-NAI)
+#
+#		Where HA-IPv4 is	WiMAX-hHA-IP-MIP4
+#		or maybe		WiMAX-vHA-IP-MIP4
+#
+#		Which goes into		WiMAX-MN-hHA-MIP4-Key
+#		or maybe		WiMAX-RRQ-MN-HA-Key
+#		or maybe even		WiMAX-vHA-MIP4-Key
+#
+#	The corresponding SPI is SPI-CMIP4, which is MIP-SPI,
+#
+#		which goes into		WiMAX-MN-hHA-MIP4-SPI
+#		or maybe		WiMAX-RRQ-MN-HA-SPI
+#		or even			WiMAX-MN-vHA-MIP4-SPI
+#
+#	MN-HA-PMIP4 = H(MIP-RK, "PMIP4 MN HA" | HA-IPv4 | MN-NAI)
+#	MN-HA-CMIP6 = H(MIP-RK, "CMIP6 MN HA" | HA-IPv6 | MN-NAI)
+#
+#		both with similar comments to above for MN-HA-CMIP4.
+#
+#	In order to tell which one to use (CMIP4, PMIP4, or CMIP6),
+#	you have to set WiMAX-IP-Technology in the reply to one of
+#	the appropriate values.
+#
+#
+#	FA-RK = H(MIP-RK, "FA-RK")
+#
+#	MN-FA = H(FA-RK, "MN FA" | FA-IP | MN-NAI)
+#
+#		Where does the FA-IP come from?  No idea...
+#
+#
+#	The next two keys (HA-RK and FA-HA) are not generated
+#	for every authentication request, but only on demand.
+#
+#	HA-RK = 160-bit random number assigned by the AAA server
+#		to a specific HA.
+#
+#	FA-HA = H(HA-RK, "FA-HA" | HA-IPv4 | FA-CoAv4 | SPI)
+#
+#		where HA-IPv4 is as above.
+#		and FA-CoAv4 address of the FA as seen by the HA
+#		and SPI is the relevant SPI for the HA-RK.
+#
+#	DHCP-RK = 160-bit random number assigned by the AAA server
+#		  to a specific DHCP server.  vDHCP-RK is the same
+#		  thing.
+#
+wimax {
+	#
+	#  Some WiMAX equipment requires that the MS-MPPE-*-Key
+	#  attributes are sent in the Access-Accept, in addition to
+	#  the WiMAX-MSK attribute.
+	#
+	#  Other WiMAX equipment request that the MS-MPPE-*-Key
+	#  attributes are NOT sent in the Access-Accept.
+	#
+	#  By default, the EAP modules sends MS-MPPE-*-Key attributes.
+	#  The default virtual server (raddb/sites-available/default)
+	#  contains examples of adding the WiMAX-MSK.
+	#
+	#  This configuration option makes the WiMAX module delete
+	#  the MS-MPPE-*-Key attributes.  The default is to leave
+	#  them in place.
+	#
+	#  If the keys are deleted (by setting this to "yes"), then
+	#  the WiMAX-MSK attribute is automatically added to the reply.
+	delete_mppe_keys = no
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-available/yubikey b/src/test/setup/radius-config/freeradius/mods-available/yubikey
new file mode 100644
index 0000000..d21c136
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-available/yubikey
@@ -0,0 +1,141 @@
+#
+#  This module decrypts and validates Yubikey static and dynamic
+#  OTP tokens.
+#
+yubikey {
+	#
+	#  The length (number of ASCII bytes) of the Public-ID portion
+	#  of the OTP string.
+	#
+	#  Yubikey defaults to a 6 byte ID (2 * 6 = 12)
+#	id_length = 12
+
+	#
+	#  If true, the authorize method of rlm_yubikey will attempt to split the
+	#  value of User-Password, into the user's password, and the OTP token.
+	#
+	#  If enabled and successful, the value of User-Password will be truncated
+	#  and request:Yubikey-OTP will be added.
+	#
+#	split = yes
+
+	#
+	#  Decrypt mode - Tokens will be decrypted and processed locally
+	#
+	#  The module itself does not provide persistent storage as this
+	#  would be duplicative of functionality already in the server.
+	#
+	#  Yubikey authentication needs two control attributes
+	#  retrieved from persistent storage:
+	#    * Yubikey-Key     - The AES key used to decrypt the OTP data.
+	#                        The Yubikey-Public-Id and/or User-Name
+	#                        attributes may be used to retrieve the key.
+	#    * Yubikey-Counter - This is compared with the counter in the OTP
+	#                        data and used to prevent replay attacks.
+	#                        This attribute will also be available in
+	#                        the request list after successful
+	#                        decryption.
+	#
+	#  Yubikey-Counter isn't strictly required, but the server will
+	#  generate warnings if it's not present when yubikey.authenticate
+	#  is called.
+	#
+	#  These attributes are available after authorization:
+	#    * Yubikey-Public-ID  - The public portion of the OTP string
+	#
+	#  These attributes are available after authentication (if successful):
+	#    * Yubikey-Private-ID - The encrypted ID included in OTP data,
+	#                           must be verified if tokens share keys.
+	#    * Yubikey-Counter    - The last counter value (should be recorded).
+	#    * Yubikey-Timestamp  - Token's internal clock (mainly useful for debugging).
+	#    * Yubikey-Random     - Randomly generated value from the token.
+	#
+	decrypt = no
+
+	#
+	#  Validation mode - Tokens will be validated against a Yubicloud server
+	#
+	validate = no
+
+	#
+	#  Settings for validation mode.
+	#
+	validation {
+		#
+		#  URL of validation server, multiple URL config items may be used
+		#  to list multiple servers.
+		#
+		# - %d is a placeholder for public ID of the token
+		# - %s is a placeholder for the token string itself
+		#
+		#  If no URLs are listed, will default to the default URLs in the
+		#  ykclient library, which point to the yubico validation servers.
+		servers {
+#			uri = 'http://api.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
+#			uri = 'http://api2.yubico.com/wsapi/2.0/verify?id=%d&otp=%s'
+		}
+
+		#
+		#  API Client ID
+		#
+		#  Must be set to your client id for the validation server.
+		#
+#		client_id = 00000
+
+		#
+		#  API Secret key (Base64 encoded)
+		#
+		#  Must be set to your API key for the validation server.
+		#
+#		api_key = '000000000000000000000000'
+
+		#
+		#  Connection pool parameters
+		#
+		pool {
+			# Number of connections to start
+			start = 5
+
+			# Minimum number of connections to keep open
+			min = 4
+
+			# Maximum number of connections
+			#
+			# If these connections are all in use and a new one
+			# is requested, the request will NOT get a connection.
+			max = 10
+
+			# Spare connections to be left idle
+			#
+			# NOTE: Idle connections WILL be closed if "idle_timeout"
+			# is set.
+			spare = 3
+
+			# Number of uses before the connection is closed
+			#
+			# 0 means "infinite"
+			uses = 0
+
+			# The lifetime (in seconds) of the connection
+			lifetime = 0
+
+			# idle timeout (in seconds).  A connection which is
+			# unused for this length of time will be closed.
+			idle_timeout = 60
+
+			# Cycle over all connections in a pool instead of concentrating
+			# connection use on a few connections.
+			spread = yes
+
+			# NOTE: All configuration settings are enforced.  If a
+			# connection is closed because of "idle_timeout",
+			# "uses", or "lifetime", then the total number of
+			# connections MAY fall below "min".  When that
+			# happens, it will open a new connection.  It will
+			# also log a WARNING message.
+			#
+			# The solution is to either lower the "min" connections,
+			# or increase lifetime/idle_timeout.
+		}
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/README.rst b/src/test/setup/radius-config/freeradius/mods-config/README.rst
new file mode 100644
index 0000000..abb4c8d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/README.rst
@@ -0,0 +1,22 @@
+The mods-config Directory
+=========================
+
+This directory contains module-specific configuration files.  These
+files are in a format different from the one used by the main
+`radiusd.conf` files.  Earlier versions of the server had many
+module-specific files in the main `raddb` directory.  The directory
+contained many files, and it was not clear which files did what.
+
+For Version 3 of FreeRADIUS, we have moved to a consistent naming
+scheme.  Each module-specific configuration file is placed in this
+directory, in a subdirectory named for the module.  Where necessary,
+files in the subdirectory have been named for the processing section
+where they are used.
+
+For example, the `users` file is now located in
+`mods-config/files/authorize`.  That filename tells us three things:
+
+1. The file is used in the `authorize` section.
+2. The file is used by the `files` module.
+3. It is a "module configuration" file, which is a specific format.
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge
new file mode 100644
index 0000000..528670c
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_challenge
@@ -0,0 +1,19 @@
+#
+#	Configuration file for the rlm_attr_filter module.
+#	Please see rlm_attr_filter(5) manpage for more information.
+#
+#	$Id: 12ed619cf16f7322221ef2dfaf28f9c36c616e3c $
+#
+#	This configuration file is used to remove almost all of the
+#	attributes From an Access-Challenge message.  The RFCs say
+#	that an Access-Challenge packet can contain only a few
+#	attributes.  We enforce that here.
+#
+DEFAULT
+	EAP-Message =* ANY,
+	State =* ANY,
+	Message-Authenticator =* ANY,
+	Reply-Message =* ANY,
+	Proxy-State =* ANY,
+	Session-Timeout =* ANY,
+	Idle-Timeout =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject
new file mode 100644
index 0000000..e5a122b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/access_reject
@@ -0,0 +1,17 @@
+#
+#	Configuration file for the rlm_attr_filter module.
+#	Please see rlm_attr_filter(5) manpage for more information.
+#
+#	$Id: 251f79c9b50d317aec0b31d2c4ff2208ef596509 $
+#
+#	This configuration file is used to remove almost all of the attributes
+#	From an Access-Reject message.  The RFCs say that an Access-Reject
+#	packet can contain only a few attributes.  We enforce that here.
+#
+DEFAULT
+	EAP-Message =* ANY,
+	State =* ANY,
+	Message-Authenticator =* ANY,
+	Reply-Message =* ANY,
+	MS-CHAP-Error =* ANY,
+	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response
new file mode 100644
index 0000000..eb72eec
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/accounting_response
@@ -0,0 +1,15 @@
+#
+#	Configuration file for the rlm_attr_filter module.
+#	Please see rlm_attr_filter(5) manpage for more information.
+#
+#	$Id: 3746ce4da3d58fcdd0b777a93e599045353c27ac $
+#
+#	This configuration file is used to remove almost all of the attributes
+#	From an Accounting-Response message.  The RFC's say that an
+#	Accounting-Response packet can contain only a few attributes.
+#	We enforce that here.
+#
+DEFAULT
+	Vendor-Specific =* ANY,
+	Message-Authenticator =* ANY,
+	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy
new file mode 100644
index 0000000..555ee48
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/post-proxy
@@ -0,0 +1,129 @@
+#
+#	Configuration file for the rlm_attr_filter module.
+#	Please see rlm_attr_filter(5) manpage for more information.
+#
+#	$Id: 21a3af9c7ad97563b372d445bee2b37d564448fe $
+#
+#	This file contains security and configuration information
+#	for each realm. The first field is the realm name and
+#	can be up to 253 characters in length. This is followed (on
+#	the next line) with the list of filter rules to be used to
+#	decide what attributes and/or values we allow proxy servers
+#	to pass to the NAS for this realm.
+#
+#	When a proxy-reply packet is received from a home server,
+#	these attributes and values are tested. Only the first match
+#	is used unless the "Fall-Through" variable is set to "Yes".
+#	In that case the rules defined in the DEFAULT case are
+#	processed as well.
+#
+#	A special realm named "DEFAULT" matches on all realm names.
+#	You can have only one DEFAULT entry. All entries are processed
+#	in the order they appear in this file. The first entry that
+#	matches the login-request will stop processing unless you use
+#	the Fall-Through variable.
+#
+#	Indented (with the tab character) lines following the first
+#	line indicate the filter rules.
+#
+#	You can include another `attrs' file with `$INCLUDE attrs.other'
+#
+
+#
+# This is a complete entry for realm "fisp". Note that there is no
+# Fall-Through entry so that no DEFAULT entry will be used, and the
+# server will NOT allow any other a/v pairs other than the ones
+# listed here.
+#
+# These rules allow:
+#     o  Only Framed-User Service-Types ( no telnet, rlogin, tcp-clear )
+#     o  PPP sessions ( no SLIP, CSLIP, etc. )
+#     o  dynamic ip assignment ( can't assign a static ip )
+#     o  an idle timeout value set to 600 seconds (10 min) or less
+#     o  a max session time set to 28800 seconds (8 hours) or less
+#
+#fisp
+#	Service-Type == Framed-User,
+#	Framed-Protocol == PPP,
+#	Framed-IP-Address == 255.255.255.254,
+#	Idle-Timeout <= 600,
+#	Session-Timeout <= 28800
+
+#
+# This is a complete entry for realm "tisp". Note that there is no
+# Fall-Through entry so that no DEFAULT entry will be used, and the
+# server will NOT allow any other a/v pairs other than the ones
+# listed here.
+#
+# These rules allow:
+#       o Only Login-User Service-Type ( no framed/ppp sessions )
+#       o Telnet sessions only ( no rlogin, tcp-clear )
+#       o Login hosts of either 192.0.2.1 or 192.0.2.2
+#
+#tisp
+#	Service-Type == Login-User,
+#	Login-Service == Telnet,
+#	Login-TCP-Port == 23,
+#	Login-IP-Host == 192.0.2.1,
+#	Login-IP-Host == 192.0.2.2
+
+#
+# The following example can be used for a home server which is only
+# allowed to supply a Reply-Message, a Session-Timeout attribute of
+# maximum 86400, a Idle-Timeout attribute of maximum 600 and a
+# Acct-Interim-Interval attribute between 300 and 3600.
+# All other attributes sent back will be filtered out.
+#
+#strictrealm
+#	Reply-Message =* ANY,
+#	Session-Timeout <= 86400,
+#	Idle-Timeout <= 600,
+#	Acct-Interim-Interval >= 300,
+#	Acct-Interim-Interval <= 3600
+
+#
+# This is a complete entry for realm "spamrealm". Fall-Through is used,
+# so that the DEFAULT filter rules are used in addition to these.
+#
+# These rules allow:
+#       o Force the application of Filter-ID attribute to be returned
+#         in the proxy reply, whether the proxy sent it or not.
+#       o The standard DEFAULT rules as defined below
+#
+#spamrealm
+#	Framed-Filter-Id := "nosmtp.in",
+#	Fall-Through = Yes
+
+#
+# The rest of this file contains the DEFAULT entry.
+# DEFAULT matches with all realm names. (except if the realm previously
+# matched an entry with no Fall-Through)
+#
+
+DEFAULT
+	Service-Type == Framed-User,
+	Service-Type == Login-User,
+	Login-Service == Telnet,
+	Login-Service == Rlogin,
+	Login-Service == TCP-Clear,
+	Login-TCP-Port <= 65536,
+	Framed-IP-Address == 255.255.255.254,
+	Framed-IP-Netmask == 255.255.255.255,
+	Framed-Protocol == PPP,
+	Framed-Protocol == SLIP,
+	Framed-Compression == Van-Jacobson-TCP-IP,
+	Framed-MTU >= 576,
+	Framed-Filter-ID =* ANY,
+	Reply-Message =* ANY,
+	Proxy-State =* ANY,
+	EAP-Message =* ANY,
+	Message-Authenticator =* ANY,
+	MS-MPPE-Recv-Key =* ANY,
+	MS-MPPE-Send-Key =* ANY,
+	MS-CHAP-MPPE-Keys =* ANY,
+	State =* ANY,
+	Session-Timeout <= 28800,
+	Idle-Timeout <= 600,
+	Calling-Station-Id =* ANY,
+	Operator-Name =* ANY,
+	Port-Limit <= 2
diff --git a/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy
new file mode 100644
index 0000000..786a341
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/attr_filter/pre-proxy
@@ -0,0 +1,62 @@
+#
+#	Configuration file for the rlm_attr_filter module.
+#	Please see rlm_attr_filter(5) manpage for more information.
+#
+#	$Id: 8c601cf205f9d85b75c1ec7fc8e816e7341a5ba4 $
+#
+#	This file contains security and configuration information
+#	for each realm. It can be used be an rlm_attr_filter module
+#	instance to filter attributes before sending packets to the
+#	home server of a realm.
+#
+#	When a packet is sent to a home server, these attributes
+#	and values are tested. Only the first match is used unless
+#	the "Fall-Through" variable is set to "Yes". In that case
+#	the rules defined in the DEFAULT case are processed as well.
+#
+#	A special realm named "DEFAULT" matches on all realm names.
+#	You can have only one DEFAULT entry. All entries are processed
+#	in the order they appear in this file. The first entry that
+#	matches the login-request will stop processing unless you use
+#	the Fall-Through variable.
+#
+#	The first line indicates the realm to which the rules apply.
+#	Indented (with the tab character) lines following the first
+#	line indicate the filter rules.
+#
+
+# This is a complete entry for 'nochap' realm. It allows to send very
+# basic attributes to the home server. Note that there is no Fall-Through
+# entry so that no DEFAULT entry will be used. Only the listed attributes
+# will be sent in the packet, all other attributes will be filtered out.
+#
+#nochap
+#	User-Name =* ANY,
+#	User-Password =* ANY,
+#	NAS-Ip-Address =* ANY,
+#	NAS-Identifier =* ANY
+
+# The entry for the 'brokenas' realm removes the attribute NAS-Port-Type
+# if its value is different from 'Ethernet'. Then the default rules are
+# applied.
+#
+#brokenas
+#	NAS-Port-Type == Ethernet
+#	Fall-Through = Yes
+
+# The rest of this file contains the DEFAULT entry.
+# DEFAULT matches with all realm names.
+
+DEFAULT
+	User-Name =* ANY,
+	User-Password =* ANY,
+	CHAP-Password =* ANY,
+	CHAP-Challenge =* ANY,
+	MS-CHAP-Challenge =* ANY,
+	MS-CHAP-Response =* ANY,
+	EAP-Message =* ANY,
+	Message-Authenticator =* ANY,
+	State =* ANY,
+	NAS-IP-Address =* ANY,
+	NAS-Identifier =* ANY,
+	Proxy-State =* ANY
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/accounting b/src/test/setup/radius-config/freeradius/mods-config/files/accounting
new file mode 100644
index 0000000..552b274
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/files/accounting
@@ -0,0 +1,23 @@
+#
+#	$Id: 322d33a01f26e3990ba19954b7847e6993ae389b $
+#
+#	This is like the 'users' file, but it is processed only for
+#	accounting packets.
+#
+
+#  Select between different accounting methods based for example on the
+#  Realm, the Huntgroup-Name or any combinaison of the attribute/value
+#  pairs contained in an accounting packet.
+#
+#DEFAULT Realm == "foo.net", Acct-Type := sql_log.foo
+#
+#DEFAULT Huntgroup-Name == "wifi", Acct-Type := sql_log.wifi
+#
+#DEFAULT Client-IP-Address == 10.0.0.1, Acct-Type := sql_log.other
+#
+#DEFAULT Acct-Status-Type == Start, Acct-Type := sql_log.start
+
+#  Replace the User-Name with the Stripped-User-Name, if it exists.
+#
+#DEFAULT
+#	User-Name := "%{%{Stripped-User-Name}:-%{User-Name}}"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/authorize b/src/test/setup/radius-config/freeradius/mods-config/files/authorize
new file mode 100644
index 0000000..3528563
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/files/authorize
@@ -0,0 +1,218 @@
+#
+# 	Configuration file for the rlm_files module.
+# 	Please see rlm_files(5) manpage for more information.
+#
+# 	This file contains authentication security and configuration
+#	information for each user.  Accounting requests are NOT processed
+#	through this file.  Instead, see 'accounting', in this directory.
+#
+#	The first field is the user's name and can be up to
+#	253 characters in length.  This is followed (on the same line) with
+#	the list of authentication requirements for that user.  This can
+#	include password, comm server name, comm server port number, protocol
+#	type (perhaps set by the "hints" file), and huntgroup name (set by
+#	the "huntgroups" file).
+#
+#	If you are not sure why a particular reply is being sent by the
+#	server, then run the server in debugging mode (radiusd -X), and
+#	you will see which entries in this file are matched.
+#
+#	When an authentication request is received from the comm server,
+#	these values are tested. Only the first match is used unless the
+#	"Fall-Through" variable is set to "Yes".
+#
+#	A special user named "DEFAULT" matches on all usernames.
+#	You can have several DEFAULT entries. All entries are processed
+#	in the order they appear in this file. The first entry that
+#	matches the login-request will stop processing unless you use
+#	the Fall-Through variable.
+#
+#	Indented (with the tab character) lines following the first
+#	line indicate the configuration values to be passed back to
+#	the comm server to allow the initiation of a user session.
+#	This can include things like the PPP configuration values
+#	or the host to log the user onto.
+#
+#	You can include another `users' file with `$INCLUDE users.other'
+
+#
+#	For a list of RADIUS attributes, and links to their definitions,
+#	see: http://www.freeradius.org/rfc/attributes.html
+#
+#	Entries below this point are examples included in the server for
+#	educational purposes. They may be deleted from the deployed
+#	configuration without impacting the operation of the server.
+#
+
+#
+# Deny access for a specific user.  Note that this entry MUST
+# be before any other 'Auth-Type' attribute which results in the user
+# being authenticated.
+#
+# Note that there is NO 'Fall-Through' attribute, so the user will not
+# be given any additional resources.
+#
+#lameuser	Auth-Type := Reject
+#		Reply-Message = "Your account has been disabled."
+
+#
+# Deny access for a group of users.
+#
+# Note that there is NO 'Fall-Through' attribute, so the user will not
+# be given any additional resources.
+#
+#DEFAULT	Group == "disabled", Auth-Type := Reject
+#		Reply-Message = "Your account has been disabled."
+#
+
+#
+# This is a complete entry for "steve". Note that there is no Fall-Through
+# entry so that no DEFAULT entry will be used, and the user will NOT
+# get any attributes in addition to the ones listed here.
+#
+#steve	Cleartext-Password := "testing"
+#	Service-Type = Framed-User,
+#	Framed-Protocol = PPP,
+#	Framed-IP-Address = 172.16.3.33,
+#	Framed-IP-Netmask = 255.255.255.0,
+#	Framed-Routing = Broadcast-Listen,
+#	Framed-Filter-Id = "std.ppp",
+#	Framed-MTU = 1500,
+#	Framed-Compression = Van-Jacobsen-TCP-IP
+
+#
+# The canonical testing user which is in most of the
+# examples.
+#
+#bob	Cleartext-Password := "hello"
+#	Reply-Message := "Hello, %{User-Name}"
+#
+
+test	Cleartext-Password := "test"
+	Reply-Message := "Hello, %{User-Name}"
+
+raduser	Cleartext-Password := "radpass"
+	Reply-Message := "Hello, %{User-Name}"
+
+#
+# This is an entry for a user with a space in their name.
+# Note the double quotes surrounding the name.  If you have
+# users with spaces in their names, you must also change
+# the "filter_username" policy to allow spaces.
+#
+# See raddb/policy.d/filter, filter_username {} section.
+#
+#"John Doe"	Cleartext-Password := "hello"
+#		Reply-Message = "Hello, %{User-Name}"
+
+#
+# Dial user back and telnet to the default host for that port
+#
+#Deg	Cleartext-Password := "ge55ged"
+#	Service-Type = Callback-Login-User,
+#	Login-IP-Host = 0.0.0.0,
+#	Callback-Number = "9,5551212",
+#	Login-Service = Telnet,
+#	Login-TCP-Port = Telnet
+
+#
+# Another complete entry. After the user "dialbk" has logged in, the
+# connection will be broken and the user will be dialed back after which
+# he will get a connection to the host "timeshare1".
+#
+#dialbk	Cleartext-Password := "callme"
+#	Service-Type = Callback-Login-User,
+#	Login-IP-Host = timeshare1,
+#	Login-Service = PortMaster,
+#	Callback-Number = "9,1-800-555-1212"
+
+#
+# user "swilson" will only get a static IP number if he logs in with
+# a framed protocol on a terminal server in Alphen (see the huntgroups file).
+#
+# Note that by setting "Fall-Through", other attributes will be added from
+# the following DEFAULT entries
+#
+#swilson	Service-Type == Framed-User, Huntgroup-Name == "alphen"
+#		Framed-IP-Address = 192.0.2.65,
+#		Fall-Through = Yes
+
+#
+# If the user logs in as 'username.shell', then authenticate them
+# using the default method, give them shell access, and stop processing
+# the rest of the file.
+#
+#DEFAULT	Suffix == ".shell"
+#		Service-Type = Login-User,
+#		Login-Service = Telnet,
+#		Login-IP-Host = your.shell.machine
+
+
+#
+# The rest of this file contains the several DEFAULT entries.
+# DEFAULT entries match with all login names.
+# Note that DEFAULT entries can also Fall-Through (see first entry).
+# A name-value pair from a DEFAULT entry will _NEVER_ override
+# an already existing name-value pair.
+#
+
+#
+# Set up different IP address pools for the terminal servers.
+# Note that the "+" behind the IP address means that this is the "base"
+# IP address. The Port-Id (S0, S1 etc) will be added to it.
+#
+#DEFAULT	Service-Type == Framed-User, Huntgroup-Name == "alphen"
+#		Framed-IP-Address = 192.0.2.32+,
+#		Fall-Through = Yes
+
+#DEFAULT	Service-Type == Framed-User, Huntgroup-Name == "delft"
+#		Framed-IP-Address = 198.51.100.32+,
+#		Fall-Through = Yes
+
+#
+# Sample defaults for all framed connections.
+#
+#DEFAULT	Service-Type == Framed-User
+#	Framed-IP-Address = 255.255.255.254,
+#	Framed-MTU = 576,
+#	Service-Type = Framed-User,
+#	Fall-Through = Yes
+
+#
+# Default for PPP: dynamic IP address, PPP mode, VJ-compression.
+# NOTE: we do not use Hint = "PPP", since PPP might also be auto-detected
+#	by the terminal server in which case there may not be a "P" suffix.
+#	The terminal server sends "Framed-Protocol = PPP" for auto PPP.
+#
+DEFAULT	Framed-Protocol == PPP
+	Framed-Protocol = PPP,
+	Framed-Compression = Van-Jacobson-TCP-IP
+
+#
+# Default for CSLIP: dynamic IP address, SLIP mode, VJ-compression.
+#
+DEFAULT	Hint == "CSLIP"
+	Framed-Protocol = SLIP,
+	Framed-Compression = Van-Jacobson-TCP-IP
+
+#
+# Default for SLIP: dynamic IP address, SLIP mode.
+#
+DEFAULT	Hint == "SLIP"
+	Framed-Protocol = SLIP
+
+#
+# Last default: rlogin to our main server.
+#
+#DEFAULT
+#	Service-Type = Login-User,
+#	Login-Service = Rlogin,
+#	Login-IP-Host = shellbox.ispdomain.com
+
+# #
+# # Last default: shell on the local terminal server.
+# #
+# DEFAULT
+# 	Service-Type = Administrative-User
+
+# On no match, the user is denied access.
diff --git a/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy b/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy
new file mode 100644
index 0000000..9c848fd
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/files/pre-proxy
@@ -0,0 +1,31 @@
+#
+#  Configuration file for the rlm_files module.
+#  Please see rlm_files(5) manpage for more information.
+#
+#  $Id: 7292e23ea51717ee5cb50c4b9b609e91ebe4a41c $
+#
+#  This file is similar to the "users" file.  The check items
+#  are compared against the request, but the "reply" items are
+#  used to update the proxied packet, not the reply to the NAS.
+#
+#  You can use this file to re-write requests which are about to
+#  be sent to a home server.
+#
+
+#
+#  Requests destinated to realm "extisp" are sent to a RADIUS
+#  home server hosted by an other company which doesn't know about
+#  the IP addresses of our NASes. Therefore we replace the value of
+#  the NAS-IP-Address attribute by a unique value we communicated
+#  to them.
+#
+#DEFAULT Realm == "extisp"
+#	NAS-IP-Address := 10.1.2.3
+
+#
+#  For all proxied packets, set the User-Name in the proxied packet
+#  to the Stripped-User-Name, if it exists.  If not, set it to the
+#  User-Name from the original request.
+#
+#DEFAULT
+#	User-Name := `%{%{Stripped-User-Name}:-%{User-Name}}`
diff --git a/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl b/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl
new file mode 100644
index 0000000..ac95aca
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/perl/example.pl
@@ -0,0 +1,206 @@
+
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License as published by
+#  the Free Software Foundation; either version 2 of the License, or
+#  (at your option) any later version.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#  GNU General Public License for more details.
+#
+#  You should have received a copy of the GNU General Public License
+#  along with this program; if not, write to the Free Software
+#  Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
+#
+#  Copyright 2002  The FreeRADIUS server project
+#  Copyright 2002  Boian Jordanov <bjordanov@orbitel.bg>
+#
+
+#
+# Example code for use with rlm_perl
+#
+# You can use every module that comes with your perl distribution!
+#
+# If you are using DBI and do some queries to DB, please be sure to
+# use the CLONE function to initialize the DBI connection to DB.
+#
+
+use strict;
+use warnings;
+
+# use ...
+use Data::Dumper;
+
+# Bring the global hashes into the package scope
+our (%RAD_REQUEST, %RAD_REPLY, %RAD_CHECK);
+
+# This is hash wich hold original request from radius
+#my %RAD_REQUEST;
+# In this hash you add values that will be returned to NAS.
+#my %RAD_REPLY;
+#This is for check items
+#my %RAD_CHECK;
+# This is configuration items from "config" perl module configuration section
+#my %RAD_PERLCONF;
+
+#
+# This the remapping of return values
+#
+use constant {
+	RLM_MODULE_REJECT   => 0, # immediately reject the request
+	RLM_MODULE_OK       => 2, # the module is OK, continue
+	RLM_MODULE_HANDLED  => 3, # the module handled the request, so stop
+	RLM_MODULE_INVALID  => 4, # the module considers the request invalid
+	RLM_MODULE_USERLOCK => 5, # reject the request (user is locked out)
+	RLM_MODULE_NOTFOUND => 6, # user not found
+	RLM_MODULE_NOOP     => 7, # module succeeded without doing anything
+	RLM_MODULE_UPDATED  => 8, # OK (pairs modified)
+	RLM_MODULE_NUMCODES => 9  # How many return codes there are
+};
+
+# Same as src/include/radiusd.h
+use constant	L_DBG=>   1;
+use constant	L_AUTH=>  2;
+use constant	L_INFO=>  3;
+use constant	L_ERR=>   4;
+use constant	L_PROXY=> 5;
+use constant	L_ACCT=>  6;
+
+#  Global variables can persist across different calls to the module.
+#
+#
+#	{
+#	 my %static_global_hash = ();
+#
+#		sub post_auth {
+#		...
+#		}
+#		...
+#	}
+
+
+# Function to handle authorize
+sub authorize {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	# Here's where your authorization code comes
+	# You can call another function from here:
+	&test_call;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle authenticate
+sub authenticate {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	if ($RAD_REQUEST{'User-Name'} =~ /^baduser/i) {
+		# Reject user and tell him why
+		$RAD_REPLY{'Reply-Message'} = "Denied access by rlm_perl function";
+		return RLM_MODULE_REJECT;
+	} else {
+		# Accept user and set some attribute
+		$RAD_REPLY{'h323-credit-amount'} = "100";
+		return RLM_MODULE_OK;
+	}
+}
+
+# Function to handle preacct
+sub preacct {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle accounting
+sub accounting {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	# You can call another subroutine from here
+	&test_call;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle checksimul
+sub checksimul {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle pre_proxy
+sub pre_proxy {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle post_proxy
+sub post_proxy {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle post_auth
+sub post_auth {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	return RLM_MODULE_OK;
+}
+
+# Function to handle xlat
+sub xlat {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	# Loads some external perl and evaluate it
+	my ($filename,$a,$b,$c,$d) = @_;
+	&radiusd::radlog(L_DBG, "From xlat $filename ");
+	&radiusd::radlog(L_DBG,"From xlat $a $b $c $d ");
+	local *FH;
+	open FH, $filename or die "open '$filename' $!";
+	local($/) = undef;
+	my $sub = <FH>;
+	close FH;
+	my $eval = qq{ sub handler{ $sub;} };
+	eval $eval;
+	eval {main->handler;};
+}
+
+# Function to handle detach
+sub detach {
+	# For debugging purposes only
+#	&log_request_attributes;
+
+	# Do some logging.
+	&radiusd::radlog(L_DBG,"rlm_perl::Detaching. Reloading. Done.");
+}
+
+#
+# Some functions that can be called from other functions
+#
+
+sub test_call {
+	# Some code goes here
+}
+
+sub log_request_attributes {
+	# This shouldn't be done in production environments!
+	# This is only meant for debugging!
+	for (keys %RAD_REQUEST) {
+		&radiusd::radlog(L_DBG, "RAD_REQUEST: $_ = $RAD_REQUEST{$_}");
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints b/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints
new file mode 100644
index 0000000..87306ad
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/preprocess/hints
@@ -0,0 +1,77 @@
+# hints
+#
+#	The hints file.   This file is used to match
+#	a request, and then add attributes to it.  This
+#	process allows a user to login as "bob.ppp" (for example),
+#	and receive a PPP connection, even if the NAS doesn't
+#	ask for PPP.  The "hints" file is used to match the
+#	".ppp" portion of the username, and to add a set of
+#	"user requested PPP" attributes to the request.
+#
+#	Matching can take place with the the Prefix and Suffix
+#	attributes, just like in the "users" file.
+#	These attributes operate ONLY on the username, though.
+#
+#	Note that the attributes that are set for each
+#	entry are _NOT_ passed back to the terminal server.
+#	Instead they are added to the information that has
+#	been _SENT_ by the terminal server.
+#
+#	This extra information can be used in the users file to
+#	match on. Usually this is done in the DEFAULT entries,
+#	of which there can be more than one.
+#
+#	In addition a matching entry can transform a username
+#	for authentication purposes if the "Strip-User-Name"
+#	variable is set to Yes in an entry (default is Yes).
+#
+#	A special non-protocol name-value pair called "Hint"
+#	can be set to match on in the "users" file.
+#
+#	The following is how most ISPs want to set this up.
+#
+# Version:	$Id: f92ffb9f1e5bd0509b2e0e5e015001fda52bdfc3 $
+#
+
+
+DEFAULT	Suffix == ".ppp", Strip-User-Name = Yes
+	Hint = "PPP",
+	Service-Type = Framed-User,
+	Framed-Protocol = PPP
+
+DEFAULT	Suffix == ".slip", Strip-User-Name = Yes
+	Hint = "SLIP",
+	Service-Type = Framed-User,
+	Framed-Protocol = SLIP
+
+DEFAULT	Suffix == ".cslip", Strip-User-Name = Yes
+	Hint = "CSLIP",
+	Service-Type = Framed-User,
+	Framed-Protocol = SLIP,
+	Framed-Compression = Van-Jacobson-TCP-IP
+
+######################################################################
+#
+#	These entries are old, and commented out by default.
+#	They confuse too many people when "Peter" logs in, and the
+#	server thinks that the user "eter" is asking for PPP.
+#
+#DEFAULT	Prefix == "U", Strip-User-Name = No
+#	Hint = "UUCP"
+
+#DEFAULT	Prefix == "P", Strip-User-Name = Yes
+#	Hint = "PPP",
+#	Service-Type = Framed-User,
+#	Framed-Protocol = PPP
+
+#DEFAULT	Prefix == "S", Strip-User-Name = Yes
+#	Hint = "SLIP",
+#	Service-Type = Framed-User,
+#	Framed-Protocol = SLIP
+
+#DEFAULT	Prefix == "C", Strip-User-Name = Yes
+#	Hint = "CSLIP",
+#	Service-Type = Framed-User,
+#	Framed-Protocol = SLIP,
+#	Framed-Compression = Van-Jacobson-TCP-IP
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups b/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups
new file mode 100644
index 0000000..a937c8b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/preprocess/huntgroups
@@ -0,0 +1,46 @@
+#
+# huntgroups	This file defines the `huntgroups' that you have. A
+#		huntgroup is defined by specifying the IP address of
+#		the NAS and possibly a port range. Port can be identified
+#		as just one port, or a range (from-to), and multiple ports
+#		or ranges of ports must be separated by a comma. For
+#		example: 1,2,3-8
+#
+#		Matching is done while RADIUS scans the user file; if it
+#		includes the selection criterium "Huntgroup-Name == XXX"
+#		the huntgroup is looked up in this file to see if it
+#		matches. There can be multiple definitions of the same
+#		huntgroup; the first one that matches will be used.
+#
+#		This file can also be used to define restricted access
+#		to certain huntgroups. The second and following lines
+#		define the access restrictions (based on username and
+#		UNIX usergroup) for the huntgroup.
+#
+
+#
+# Our POP in Alphen a/d Rijn has 3 terminal servers. Create a Huntgroup-Name
+# called Alphen that matches on all three terminal servers.
+#
+#alphen		NAS-IP-Address == 192.0.2.5
+#alphen		NAS-IP-Address == 192.0.2.6
+#alphen		NAS-IP-Address == 192.0.2.7
+
+#
+# The POP in Delft consists of only one terminal server.
+#
+#delft		NAS-IP-Address == 198.51.100.5
+
+#
+# Ports 0-7 on the first terminal server in Alphen are connected to
+# a huntgroup that is for business users only. Note that only one
+# of the username or groupname has to match to get access (OR/OR).
+#
+# Note that this huntgroup is a subset of the "alphen" huntgroup.
+#
+#business	NAS-IP-Address == 198.51.100.5, NAS-Port-Id == 0-7
+#		User-Name = rogerl,
+#		User-Name = henks,
+#		Group = business,
+#		Group = staff
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/python/example.py b/src/test/setup/radius-config/freeradius/mods-config/python/example.py
new file mode 100755
index 0000000..a000483
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/python/example.py
@@ -0,0 +1,58 @@
+#! /usr/bin/env python
+#
+# Definitions for RADIUS programs
+#
+# Copyright 2002 Miguel A.L. Paraz <mparaz@mparaz.com>
+#
+# This should only be used when testing modules.
+# Inside freeradius, the 'radiusd' Python module is created by the C module
+# and the definitions are automatically created.
+#
+# $Id: 02e9f237cc0df3d7be08413238e504b90bf59b1a $
+
+# from modules.h
+
+RLM_MODULE_REJECT = 0
+RLM_MODULE_FAIL = 1
+RLM_MODULE_OK = 2
+RLM_MODULE_HANDLED = 3
+RLM_MODULE_INVALID = 4
+RLM_MODULE_USERLOCK = 5
+RLM_MODULE_NOTFOUND = 6
+RLM_MODULE_NOOP = 7	
+RLM_MODULE_UPDATED = 8
+RLM_MODULE_NUMCODES = 9
+
+
+# from radiusd.h
+L_DBG = 1
+L_AUTH = 2
+L_INFO = 3
+L_ERR = 4
+L_PROXY	= 5
+L_CONS = 128
+
+OP={       '{':2,   '}':3,   '(':4,   ')':5,   ',':6,   ';':7,  '+=':8,  '-=':9,  ':=':10,
+  '=':11, '!=':12, '>=':13,  '>':14, '<=':15,  '<':16, '=~':17, '!~':18, '=*':19, '!*':20,
+ '==':21 , '#':22 }
+
+OP_TRY = (':=', '+=', '-=', '=' )
+
+def resolve(*lines):
+    tuples = []
+    for line in lines:
+	for op in OP_TRY:
+	    arr = line.rsplit(op)
+	    if len(arr)==2:
+		tuples.append((str(arr[0].strip()),OP[op],str(arr[1].strip())))
+		break
+    return tuple(tuples)
+
+# log function
+def radlog(level, msg):
+    import sys
+    sys.stdout.write(msg + '\n')
+
+    level = level
+
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf
new file mode 100644
index 0000000..97c4661
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/dailycounter.conf
@@ -0,0 +1,33 @@
+#
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+#
+query = "\
+	SELECT SUM(acctsessiontime - GREATEST((%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
+	FROM radacct \
+	WHERE username = '%{${key}}' \
+	AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%b'"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE username = '%{${key}}' \
+#	AND acctstarttime > FROM_UNIXTIME('%b')"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE username = '%{${key}}' \
+#	AND acctstarttime BETWEEN FROM_UNIXTIME('%b') AND FROM_UNIXTIME('%e')"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf
new file mode 100644
index 0000000..97e1bc5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/expire_on_login.conf
@@ -0,0 +1,6 @@
+query = "\
+	SELECT TIMESTAMPDIFF(SECOND, acctstarttime, NOW()) \
+	FROM radacct \
+	WHERE UserName='%{${key}}' \
+	ORDER BY acctstarttime \
+	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf
new file mode 100644
index 0000000..6d93d15
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/monthlycounter.conf
@@ -0,0 +1,34 @@
+#
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+#
+query = "\
+	SELECT SUM(acctsessiontime - GREATEST((%b - UNIX_TIMESTAMP(acctstarttime)), 0)) \
+	FROM radacct \
+	WHERE username='%{${key}}' \
+	AND UNIX_TIMESTAMP(acctstarttime) + acctsessiontime > '%b'"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct\
+#	WHERE username='%{${key}}' \
+#	AND acctstarttime > FROM_UNIXTIME('%b')"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE username='%{${key}}' \
+#	AND acctstarttime BETWEEN FROM_UNIXTIME('%b') \
+#	AND FROM_UNIXTIME('%e')"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf
new file mode 100644
index 0000000..abcb21b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/mysql/noresetcounter.conf
@@ -0,0 +1,4 @@
+query = "\
+	SELECT IFNULL(SUM(AcctSessionTime),0) \
+	FROM radacct \
+	WHERE UserName='%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf
new file mode 100644
index 0000000..64802bf
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/dailycounter.conf
@@ -0,0 +1,34 @@
+#
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+#
+query = "\
+	SELECT SUM(AcctSessionTime - GREATER((%b - AcctStartTime::ABSTIME::INT4), 0)) \
+	FROM radacct \
+	WHERE UserName='%{${key}}' \
+	AND AcctStartTime::ABSTIME::INT4 + AcctSessionTime > '%b'"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(AcctSessionTime) \
+#	FROM radacct \
+#	WHERE UserName='%{${key}}' \
+#	AND AcctStartTime::ABSTIME::INT4 > '%b'"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(AcctSessionTime) \
+#	FROM radacct \
+#	WHERE UserName='%{${key}}' \
+#	AND AcctStartTime::ABSTIME::INT4 BETWEEN '%b' \
+#	AND '%e'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf
new file mode 100644
index 0000000..c4ce096
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/expire_on_login.conf
@@ -0,0 +1,6 @@
+query = "\
+	SELECT TIME_TO_SEC(TIMEDIFF(NOW(), acctstarttime)) \
+	FROM radacct \
+	WHERE UserName='%{${key}}' \
+	ORDER BY acctstarttime \
+	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf
new file mode 100644
index 0000000..eb831a4
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/monthlycounter.conf
@@ -0,0 +1,31 @@
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+query = "\
+	SELECT SUM(AcctSessionTime - GREATER((%b - AcctStartTime::ABSTIME::INT4), 0)) \
+	FROM radacct \
+	WHERE UserName='%{${key}}' \
+	AND AcctStartTime::ABSTIME::INT4 + AcctSessionTime > '%b'"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(AcctSessionTime) \
+#	FROM radacct \
+#	WHERE UserName='%{${key}}' \
+#	AND AcctStartTime::ABSTIME::INT4 > '%b'"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(AcctSessionTime) \
+#	FROM radacct \
+#	WHERE UserName='%{${key}}' \
+#	AND AcctStartTime::ABSTIME::INT4 BETWEEN '%b' AND '%e'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf
new file mode 100644
index 0000000..ac5182e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/postgresql/noresetcounter.conf
@@ -0,0 +1,4 @@
+query = "\
+	SELECT SUM(AcctSessionTime) \
+	FROM radacct \
+	WHERE UserName='%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf
new file mode 100644
index 0000000..6befdcc
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/dailycounter.conf
@@ -0,0 +1,33 @@
+#
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+#
+query = "\
+	SELECT SUM(acctsessiontime - GREATEST((%b - strftime('%%s', acctstarttime)), 0)) \
+	FROM radacct \
+	WHERE username = '%{${key}}' \
+	AND (strftime('%%s', acctstarttime) + acctsessiontime) > %b"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE \username = '%{${key}}' \
+#	AND acctstarttime > %b"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) FROM radacct \
+#	WHERE username = '%{${key}}' \
+#	AND acctstarttime BETWEEN %b \
+#	AND %e"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf
new file mode 100644
index 0000000..f4e95a5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/expire_on_login.conf
@@ -0,0 +1,6 @@
+query = "\
+	SELECT GREATEST(strftime('%%s', NOW()) - strftime('%%s', acctstarttime), 0) AS expires \
+	FROM radacct \
+	WHERE username = '%{${key}}' \
+	ORDER BY acctstarttime \
+	LIMIT 1;"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf
new file mode 100644
index 0000000..5bb8140
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/monthlycounter.conf
@@ -0,0 +1,34 @@
+#
+#  This query properly handles calls that span from the
+#  previous reset period into the current period but
+#  involves more work for the SQL server than those
+#  below
+#
+query = "\
+	SELECT SUM(acctsessiontime - GREATEST((%b - strftime('%%s', acctstarttime)), 0)) \
+	FROM radacct \
+	WHERE username = '%{${key}}' AND \
+	(strftime('%%s', acctstarttime) + acctsessiontime) > %b"
+
+#
+#  This query ignores calls that started in a previous
+#  reset period and continue into into this one. But it
+#  is a little easier on the SQL server
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE username = '%{${key}}' \
+#	AND acctstarttime > %b"
+
+#
+#  This query is the same as above, but demonstrates an
+#  additional counter parameter '%e' which is the
+#  timestamp for the end of the period
+#
+#query = "\
+#	SELECT SUM(acctsessiontime) \
+#	FROM radacct \
+#	WHERE username = '%{${key}}' \
+#	AND acctstarttime BETWEEN %b \
+#	AND %e"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf
new file mode 100644
index 0000000..ac2d869
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/counter/sqlite/noresetcounter.conf
@@ -0,0 +1,4 @@
+query = "\
+	SELECT IFNULL(SUM(acctsessiontime),0) \
+	FROM radacct \
+	WHERE username = '%{${key}}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf
new file mode 100644
index 0000000..415c416
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/queries.conf
@@ -0,0 +1,50 @@
+# -*- text -*-
+#
+#  cui/mysql/queries.conf -- Queries to update a MySQL CUI table.
+#
+#  $Id: f8f18cab562e7321756cd1f3411bbc9897ef3377 $
+
+post-auth {
+	query = "\
+		INSERT IGNORE INTO ${..cui_table} \
+			(clientipaddress, callingstationid, username, cui, lastaccounting) \
+		VALUES \
+			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
+			'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL) \
+		ON DUPLICATE KEY UPDATE \
+			lastaccounting='0000-00-00 00:00:00', \
+			cui='%{reply:Chargeable-User-Identity}'"
+
+}
+
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+	type {
+		start {
+			query = "\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = CURRENT_TIMESTAMP \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		interim-update {
+			query ="\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = CURRENT_TIMESTAMP \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		stop {
+			query ="\
+				DELETE FROM ${....cui_table} \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql
new file mode 100644
index 0000000..da9b2f7
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/mysql/schema.sql
@@ -0,0 +1,9 @@
+CREATE TABLE `cui` (
+  `clientipaddress` varchar(46) NOT NULL default '',
+  `callingstationid` varchar(50) NOT NULL default '',
+  `username` varchar(64) NOT NULL default '',
+  `cui` varchar(32) NOT NULL default '',
+  `creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
+  `lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
+  PRIMARY KEY  (`username`,`clientipaddress`,`callingstationid`)
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf
new file mode 100644
index 0000000..0e985b3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/queries.conf
@@ -0,0 +1,47 @@
+# -*- text -*-
+#
+#  cui/postgresql/queries.conf -- Queries to update a PostgreSQL CUI table.
+#
+#  $Id: 6c2215f0abbe5cb30658ea541d525fd7a274c547 $
+
+post-auth {
+	query = "\
+		INSERT INTO ${..cui_table} \
+			(clientipaddress, callingstationid, username, cui) \
+		VALUES \
+			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
+			'%{User-Name}', '%{reply:Chargeable-User-Identity}')"
+
+}
+
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+	type {
+		start {
+			query = "\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = now() \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		interim-update {
+			query ="\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = now() \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		stop {
+			query ="\
+				DELETE FROM ${....cui_table} \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql
new file mode 100644
index 0000000..3b24401
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/postgresql/schema.sql
@@ -0,0 +1,14 @@
+CREATE TABLE cui (
+  clientipaddress INET NOT NULL DEFAULT '0.0.0.0',
+  callingstationid varchar(50) NOT NULL DEFAULT '',
+  username varchar(64) NOT NULL DEFAULT '',
+  cui varchar(32) NOT NULL DEFAULT '',
+  creationdate TIMESTAMP with time zone NOT NULL default 'now()',
+  lastaccounting TIMESTAMP with time zone NOT NULL default '-infinity'::timestamp,
+  PRIMARY KEY  (username, clientipaddress, callingstationid)
+);
+
+CREATE RULE postauth_query AS ON INSERT TO cui
+	WHERE EXISTS(SELECT 1 FROM cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid))
+	DO INSTEAD UPDATE cui SET lastaccounting ='-infinity'::timestamp with time zone, cui=NEW.cui WHERE (username, clientipaddress, callingstationid)=(NEW.username, NEW.clientipaddress, NEW.callingstationid);
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf
new file mode 100644
index 0000000..defc591
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/queries.conf
@@ -0,0 +1,47 @@
+# -*- text -*-
+#
+#  cui/sqlite/queries.conf -- Queries to update a sqlite CUI table.
+#
+#  $Id: 41741eb70ae9c428ba5230aaf9d9b84f95c050a9 $
+
+post-auth {
+	query = "\
+		INSERT OR REPLACE INTO ${..cui_table} \
+			(clientipaddress, callingstationid, username, cui, lastaccounting) \
+		VALUES \
+			('%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}', '%{Calling-Station-Id}', \
+			'%{User-Name}', '%{reply:Chargeable-User-Identity}', NULL)"
+
+}
+
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+	type {
+		start {
+			query = "\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = CURRENT_TIMESTAMP \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		interim-update {
+			query ="\
+				UPDATE ${....cui_table} SET \
+					lastaccounting = CURRENT_TIMESTAMP \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+		stop {
+			query ="\
+				DELETE FROM ${....cui_table} \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}' \
+				AND cui = '%{Chargeable-User-Identity}'"
+		}
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql
new file mode 100644
index 0000000..8473534
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/cui/sqlite/schema.sql
@@ -0,0 +1,9 @@
+CREATE TABLE `cui` (
+  `clientipaddress` varchar(46) NOT NULL default '',
+  `callingstationid` varchar(50) NOT NULL default '',
+  `username` varchar(64) NOT NULL default '',
+  `cui` varchar(32) NOT NULL default '',
+  `creationdate` timestamp NOT NULL default CURRENT_TIMESTAMP,
+  `lastaccounting` timestamp NOT NULL default '0000-00-00 00:00:00',
+  PRIMARY KEY  (`username`,`clientipaddress`,`callingstationid`)
+);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf
new file mode 100644
index 0000000..eb1a79d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/mysql/queries.conf
@@ -0,0 +1,161 @@
+# -*- text -*-
+#
+#  ippool-dhcp/mysql/queries.conf -- MySQL queries for rlm_sqlippool
+#
+#  $Id: ac9476e5091010d39c9212e424efb360a99a1e71 $
+
+#
+# This series of queries allocates an IP address
+#
+#allocate_clear = "\
+#	UPDATE ${ippool_table} \
+#	SET \
+#		nasipaddress = '', \
+#		pool_key = 0, \
+#		callingstationid = '', \
+#		username = '', \
+#		expiry_time = NULL \
+#	WHERE pool_key = '${pool_key}'"
+
+#
+#  This series of queries allocates an IP address
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE expiry_time <= NOW() - INTERVAL 1 SECOND \
+	AND nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  which user had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress \
+	FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND (expiry_time < NOW() OR expiry_time IS NULL) \
+	ORDER BY \
+		(username <> '%{User-Name}'), \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	LIMIT 1 \
+	OR UPDATE"
+
+#
+#  If you prefer to allocate a random IP address every time, use this query instead
+#
+#allocate_find = "\
+#	SELECT framedipaddress \
+#	FROM ${ippool_table} \
+#	WHERE pool_name = '%{control:Pool-Name}' \
+#	AND expiry_time IS NULL \
+#	ORDER BY RAND() \
+#	LIMIT 1 \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see if the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be
+#  commented out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM ${ippool_table} \
+	WHERE pool_name='%{control:Pool-Name}' \
+	LIMIT 1"
+
+#
+#  This is the final IP Allocation query, which saves the allocated ip details
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{User-Name}', \
+		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE framedipaddress = '%I' AND expiry_time IS NULL"
+
+#
+#  This series of queries frees an IP number when an accounting
+#  START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting
+#  STOP record arrives
+#
+stop_clear = "UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting
+#  ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting ON record arrives
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting OFF record arrives
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf
new file mode 100644
index 0000000..673547b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/queries.conf
@@ -0,0 +1,175 @@
+# -*- text -*-
+#
+#  ippool-dhcp/oracle/queries.conf -- Oracle queries for dhcp-ippool
+#
+#  $id: 416d59802a1321c16b936bb5e63c288ca3634bcd $
+
+#
+#  "START TRANSACTION" not required with Oracle
+#
+allocate_begin = ""
+start_begin = ""
+alive_begin = ""
+stop_begin = ""
+on_begin = ""
+off_begin = ""
+
+#
+#  This query allocates an IP address from the Pool
+#  It query tries to allocate to the user
+#  either the same IP-address that they had last session
+#  or the IP which has been unused for the longest period of time
+#
+allocate_find = "\
+	WITH POOLS AS (\
+		SELECT * \
+		FROM ${ippool_table} \
+		WHERE pool_name = '%{control:Pool-Name}' \
+		AND (\
+			pool_key = '${pool_key}' \
+			OR expiry_time = (\
+				SELECT MIN(expiry_time) \
+				FROM ${ippool_table} \
+				WHERE pool_name = '%{control:Pool-Name}' \
+				AND expiry_time < CURRENT_TIMESTAMP AND pool_key != '${pool_key}'\
+			)\
+		)\
+	) \
+	SELECT framedipaddress \
+	FROM (\
+		SELECT framedipaddress \
+		FROM POOLS \
+		WHERE pool_key = '${pool_key}' \
+		OR (\
+			NOT EXISTS (\
+				SELECT 1 \
+				FROM POOLS \
+				WHERE pool_key = '${pool_key}'\
+			)\
+		)\
+	) WHERE ROWNUM = 1 FOR UPDATE"
+
+#
+#  This function is available if you want to use multiple pools
+#
+#allocate_find = "\
+	SELECT msqlippool('%{SQL-User-Name}','%{control:Pool-Name}') \
+	FROM dual"
+
+#
+#  If you prefer to allocate a random IP address every time, use this query instead
+#
+#allocate_find = "\
+#	SELECT framedipaddress \
+#	FROM ${ippool_table}\
+#	WHERE framedipaddress = (\
+#		SELECT framedipaddress \
+#		FROM (\
+#			SELECT framedipaddress \
+#			FROM ${ippool_table} \
+#			WHERE pool_name = '%{control:Pool-Name}' \
+#			AND expiry_time < CURRENT_TIMESTAMP \
+#			ORDER BY DBMS_RANDOM.VALUE\
+#		) \
+#		WHERE ROWNUM = 1\
+#	) \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see whether the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be commented
+#  out to save running this query every time an ip is not allocated.
+#
+#pool_check = "\
+#	SELECT id \
+#	FROM (\
+#		SELECT id \
+#		FROM ${ippool_table} \
+#		WHERE pool_name = '%{control:Pool-Name}'\
+#	) WHERE ROWNUM = 1"
+
+#
+#  This query marks the IP address handed out by "allocate_find" as used
+#  for the period of "lease_duration" after which time it may be reused.
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-id}', \
+		username = '%{SQL-User-Name}', \
+		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
+	WHERE framedipaddress = '%I'"
+
+#
+#  This query frees the IP address assigned to "pool_key" when a new request
+#  comes in for the same "pool_key". This means that either you are losing
+#  accounting Stop records or you use Calling-Station-id instead of NAS-Port
+#  as your "pool_key" and your users are able to reconnect before your NAS
+#  has timed out their previous session. (Generally on wireless networks)
+#  (Note: If your pool_key is set to Calling-Station-id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{NAS-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
+	WHERE pool_key = '${pool_key}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_name = '%{control:Pool-Name}' \
+	AND pool_key = '${pool_key}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This query frees an IP address when an accounting
+#  STOP record arrives
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
+	WHERE pool_key = '${pool_key}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP + INTERVAL '${lease_duration}' SECOND(1) \
+	WHERE pool_key = '${pool_key}' \
+	AND pool_name = '%{control:Pool-Name}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting ON record arrives from that NAS
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
+	WHERE nasipaddress = '%{NAS-IP-Address}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting OFF record arrives from that NAS
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = CURRENT_TIMESTAMP - INTERVAL '1' SECOND(1) \
+	WHERE nasipaddress = '%{NAS-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql
new file mode 100644
index 0000000..95ceb8e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/oracle/schema.sql
@@ -0,0 +1,28 @@
+CREATE TABLE radippool (
+	id                      INT PRIMARY KEY,
+	pool_name               VARCHAR(30) NOT NULL,
+	framedipaddress         VARCHAR(30) NOT NULL,
+	nasipaddress            VARCHAR(30) NOT NULL,
+	pool_key                VARCHAR(64) NOT NULL,
+	calledstationid         VARCHAR(64),
+	callingstationid        VARCHAR(64) NOT NULL,
+	expiry_time             TIMESTAMP(0) NOT NULL,
+	username                VARCHAR(100)
+);
+
+CREATE INDEX radippool_poolname_ipaddr ON radippool (pool_name, framedipaddress);
+CREATE INDEX radippool_poolname_expire ON radippool (pool_name, expiry_time);
+CREATE INDEX radippool_nasipaddr_key ON radippool (nasipaddress, pool_key);
+CREATE INDEX radippool_nasipaddr_calling ON radippool (nasipaddress, callingstationid);
+
+CREATE SEQUENCE radippool_seq START WITH 1 INCREMENT BY 1;
+
+CREATE OR REPLACE TRIGGER radippool_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radippool
+	FOR EACH ROW
+	BEGIN
+		IF ( :NEW.id = 0 OR :NEW.id IS NULL ) THEN
+			SELECT radippool_seq.NEXTVAL INTO :NEW.id FROM dual;
+		END IF;
+	END;
+/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf
new file mode 100644
index 0000000..124f349
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/queries.conf
@@ -0,0 +1,165 @@
+# -*- text -*-
+#
+#  ippool-dhcp/sqlite/queries.conf -- SQLite queries for rlm_sqlippool
+#
+#  $Id: 8709a562ee8877f02a43118464371eae101f3fbc $
+
+#
+#  This series of queries allocates an IP address
+#
+#allocate_clear = "\
+#	UPDATE ${ippool_table} \
+#	SET \
+#		nasipaddress = '', \
+#		pool_key = 0, \
+#		callingstationid = '', \
+#		username = '', \
+#		expiry_time = NULL \
+#	WHERE pool_key = '${pool_key}'"
+
+#
+#  This series of queries allocates an IP address
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE expiry_time <= datetime(strftime('%%s', 'now') - 1, 'unixepoch') \
+	AND nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  which user had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress \
+	FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND (\
+		((expiry_time < datetime('now')) OR expiry_time IS NULL) \
+		OR (callingstationid = '%{Calling-Station-Id}') \
+		AND expiry_time > datetime('now')\
+	) \
+	ORDER BY \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	LIMIT 1"
+
+#
+# If you prefer to allocate a random IP address every time, use this query instead
+#
+#allocate_find = "\
+#	SELECT framedipaddress FROM ${ippool_table} \
+#	WHERE pool_name = '%{control:Pool-Name}' \
+#	AND expiry_time IS NULL \
+#	ORDER BY RAND() \
+#	LIMIT 1 \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see if the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be
+#  commented out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM ${ippool_table} \
+	WHERE pool_name='%{control:Pool-Name}' \
+	LIMIT 1"
+
+#
+#  This is the final IP Allocation query, which saves the allocated ip details
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{User-Name}', \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE framedipaddress = '%I' \
+	AND expiry_time IS NULL"
+
+#
+#  The following queries are not used for DHCP IP assignment.
+#
+
+#
+#  This series of queries frees an IP number when an accounting START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting STOP record arrives
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting ON record arrives
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE \nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting OFF record arrives
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql
new file mode 100644
index 0000000..9004b36
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool-dhcp/sqlite/schema.sql
@@ -0,0 +1,18 @@
+CREATE TABLE radippool (
+	id                      int PRIMARY KEY,
+	pool_name               varchar(30) NOT NULL,
+	framedipaddress         varchar(30) NOT NULL,
+	nasipaddress            varchar(30) NOT NULL DEFAULT '',
+	pool_key                varchar(64) NOT NULL DEFAULT '',
+	calledstationid         varchar(64),
+	callingstationid        varchar(64) NOT NULL DEFAULT '',
+	expiry_time             timestamp DEFAULT NULL,
+	username                varchar(100)
+);
+ 
+-- Example of how to put IPs in the pool
+-- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (1, 'local', '192.168.5.10');
+-- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (2, 'local', '192.168.5.11');
+-- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (3, 'local', '192.168.5.12');
+-- INSERT INTO radippool (id, pool_name, framedipaddress) VALUES (4, 'local', '192.168.5.13');
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf
new file mode 100644
index 0000000..e17c513
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/queries.conf
@@ -0,0 +1,157 @@
+# -*- text -*-
+#
+#  ippool/mysql/queries.conf -- MySQL queries for rlm_sqlippool
+#
+#  $Id: ecdb8beda2fe841c07f513f3a6be9e535f73875b $
+
+#
+#  This series of queries allocates an IP address
+#
+#allocate_clear = "\
+#	UPDATE ${ippool_table} \
+#	SET \
+#		nasipaddress = '', \
+#		pool_key = 0, \
+#		callingstationid = '', \
+#		username = '', \
+#		expiry_time = NULL \
+#	WHERE pool_key = '${pool_key}'"
+
+#
+#  This series of queries allocates an IP address
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE expiry_time <= NOW() - INTERVAL 1 SECOND \
+	AND nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  which user had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND (expiry_time < NOW() OR expiry_time IS NULL) \
+	ORDER BY \
+		(username <> '%{User-Name}'), \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	LIMIT 1 \
+	FOR UPDATE"
+
+#
+#  If you prefer to allocate a random IP address every time, use this query instead.
+#
+#allocate_find = "\
+#	SELECT framedipaddress FROM ${ippool_table} \
+#	WHERE pool_name = '%{control:Pool-Name}' \
+#	AND expiry_time IS NULL \
+#	ORDER BY \
+#		RAND() \
+#	LIMIT 1 \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see if the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be
+#  commented out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM ${ippool_table} \
+	WHERE pool_name='%{control:Pool-Name}' \
+	LIMIT 1"
+
+#
+#  This is the final IP Allocation query, which saves the allocated ip details.
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{User-Name}', expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE framedipaddress = '%I' \
+	AND expiry_time IS NULL"
+
+#
+#  This series of queries frees an IP number when an accounting START record arrives.
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting STOP record arrives.
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting ALIVE record arrives.
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = NOW() + INTERVAL ${lease_duration} SECOND \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting ON record arrives
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting OFF record arrives
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql
new file mode 100644
index 0000000..ffacc44
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/mysql/schema.sql
@@ -0,0 +1,18 @@
+#
+# Table structure for table 'radippool'
+#
+CREATE TABLE radippool (
+  id                    int(11) unsigned NOT NULL auto_increment,
+  pool_name             varchar(30) NOT NULL,
+  framedipaddress       varchar(15) NOT NULL default '',
+  nasipaddress          varchar(15) NOT NULL default '',
+  calledstationid       VARCHAR(30) NOT NULL,
+  callingstationid      VARCHAR(30) NOT NULL,
+  expiry_time           DATETIME NULL default NULL,
+  username              varchar(64) NOT NULL default '',
+  pool_key              varchar(30) NOT NULL,
+  PRIMARY KEY (id),
+  KEY radippool_poolname_expire (pool_name, expiry_time),
+  KEY framedipaddress (framedipaddress),
+  KEY radippool_nasip_poolkey_ipaddress (nasipaddress, pool_key, framedipaddress)
+) ENGINE=InnoDB;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql
new file mode 100644
index 0000000..598900d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/procedures.sql
@@ -0,0 +1,41 @@
+CREATE OR REPLACE FUNCTION msqlippool(user varchar2, pool varchar2)
+RETURN varchar2 IS
+
+	PRAGMA AUTONOMOUS_TRANSACTION;
+	ip_temp varchar2(20);
+BEGIN
+
+    -- If the user's pool is dynamic, get an ipaddress (oldest one) from the corresponding pool
+
+    if pool = 'Dynamic' then
+	select framedipaddress into ip_temp from (select framedipaddress from radippool where expiry_time < current_timestamp and pool_name = pool ORDER BY expiry_time) where rownum = 1;
+	return (ip_temp);
+
+    -- Else, then get the static ipaddress for that user from the corresponding pool
+
+    else
+	select framedipaddress into ip_temp from radippool where username = user and pool_name = pool;
+	return (ip_temp);
+    end if;
+
+exception
+
+ -- This block is executed if there's no free ipaddresses or no static ip assigned to the user
+
+ when NO_DATA_FOUND then
+	if pool = 'Dynamic' then
+		return(''); -- so sqlippool can log it on radius.log
+	end if;
+
+	-- Else, grabs a free IP from the static pool and saves it in radippool so the user will always get the same IP the next time
+
+	select framedipaddress into ip_temp from (select framedipaddress from radippool where expiry_time < current_timestamp and username is null and pool_name = pool) where rownum = 1;
+	UPDATE radippool SET username = user where framedipaddress = ip_temp;
+	commit;
+	return (ip_temp);
+
+ when others
+  then return('Oracle Exception');
+
+END;
+/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf
new file mode 100644
index 0000000..686f92a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/queries.conf
@@ -0,0 +1,162 @@
+# -*- text -*-
+#
+#  ippool/oracle/queries.conf -- Oracle queries for rlm_sqlippool
+#
+#  $Id: 06d37f8985f3da1ac36276bdc9ca9c15a42d4059 $
+
+allocate_begin = "commit"
+start_begin = "commit"
+alive_begin = "commit"
+stop_begin = "commit"
+on_begin = "commit"
+off_begin = "commit"
+
+#
+#  This query allocates an IP address from the Pool
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  to the user that they had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress \
+	FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND expiry_time < current_timestamp \
+	AND rownum <= 1 \
+	ORDER BY \
+		(username <> '%{SQL-User-Name}'), \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	FOR UPDATE"
+
+#
+#  This function is available if you want to use multiple pools
+#
+#allocate_find = "\
+#	SELECT msqlippool('%{SQL-User-Name}','%{control:Pool-Name}') \
+#	FROM dual"
+
+#
+#  If you prefer to allocate a random IP address every time, use this query instead
+#
+#allocate_find = "\
+#	SELECT framedipaddress \
+#	FROM ${ippool_table} \
+#	WHERE pool_name = '%{control:Pool-Name}' \
+#	AND expiry_time < current_timestamp \
+#	AND rownum <= 1 \
+#	ORDER BY RANDOM() \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see whether the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be commented
+#  out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM (\
+		SELECT id \
+		FROM ${ippool_table} \
+		WHERE pool_name='%{control:Pool-Name}'\
+	) \
+	WHERE ROWNUM = 1"
+
+#
+#  This query marks the IP address handed out by "allocate-find" as used
+#  for the period of "lease_duration" after which time it may be reused.
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{SQL-User-Name}', \
+		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
+	WHERE framedipaddress = '%I'"
+
+#
+#  This query frees the IP address assigned to "pool_key" when a new request
+#  comes in for the same "pool_key". This means that either you are losing
+#  accounting Stop records or you use Calling-Station-Id instead of NAS-Port
+#  as your "pool_key" and your users are able to reconnect before your NAS
+#  has timed out their previous session. (Generally on wireless networks)
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = current_timestamp - INTERVAL '1' second(1) \
+	WHERE pool_key = '${pool_key}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}'"
+
+#
+#  This query frees an IP address when an accounting STOP record arrives
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = current_timestamp - INTERVAL '1' second(1) \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{SQL-User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = current_timestamp + INTERVAL '${lease_duration}' second(1) \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND framedipaddress = '%{Framed-IP-Address}' \
+	AND username = '%{SQL-User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting ON record arrives from that NAS
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = current_timestamp - INTERVAL '1' second(1) \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting OFF record arrives from that NAS
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = current_timestamp - INTERVAL '1' second(1) \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql
new file mode 100644
index 0000000..c85c293
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/oracle/schema.sql
@@ -0,0 +1,28 @@
+CREATE TABLE radippool (
+	id                      INT PRIMARY KEY,
+	pool_name               VARCHAR(30) NOT NULL,
+	framedipaddress         VARCHAR(30) NOT NULL,
+	nasipaddress            VARCHAR(30) NOT NULL,
+	pool_key                INT NOT NULL,
+	CalledStationId         VARCHAR(64),
+	CallingStationId        VARCHAR(64) NOT NULL,
+	expiry_time             timestamp(0) NOT NULL,
+	username                VARCHAR(100)
+);
+
+CREATE INDEX radippool_poolname_ipaadr ON radippool (pool_name, framedipaddress);
+CREATE INDEX radippool_poolname_expire ON radippool (pool_name, expiry_time);
+CREATE INDEX radippool_nasipaddr_key ON radippool (nasipaddress, pool_key);
+CREATE INDEX radippool_nasipaddr_calling ON radippool (nasipaddress, callingstationid);
+
+CREATE SEQUENCE radippool_seq START WITH 1 INCREMENT BY 1;
+
+CREATE OR REPLACE TRIGGER radippool_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radippool
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radippool_seq.nextval into :new.id from dual;
+		end if;
+	END;
+/
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf
new file mode 100644
index 0000000..d286cf6
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/queries.conf
@@ -0,0 +1,146 @@
+# -*- text -*-
+#
+#  ippool/postgresql/queries.conf -- PostgreSQL queries for rlm_sqlippool
+#
+#  $Id: 38465e829f61efab50f565dc349ef64b29052f21 $
+
+#
+#  This query allocates an IP address from the Pool
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  to the user that they had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress \
+	FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND expiry_time < 'now'::timestamp(0) \
+	ORDER BY \
+		(username <> '%{SQL-User-Name}'), \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	LIMIT 1 \
+	FOR UPDATE"
+
+#
+#  If you prefer to allocate a random IP address every time, use this query instead
+#
+allocate_find = "\
+	SELECT framedipaddress FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' AND expiry_time < 'now'::timestamp(0) \
+	ORDER BY RANDOM() \
+	LIMIT 1 \
+	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see whether the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be commented
+#  out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM ${ippool_table} \
+	WHERE pool_name='%{control:Pool-Name}' \
+	LIMIT 1"
+
+#
+#  This query marks the IP address handed out by "allocate-find" as used
+#  for the period of "lease_duration" after which time it may be reused.
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{SQL-User-Name}', \
+		expiry_time = 'now'::timestamp(0) + '${lease_duration} second'::interval \
+	WHERE framedipaddress = '%I'"
+
+#
+#  This query frees the IP address assigned to "pool_key" when a new request
+#  comes in for the same "pool_key". This means that either you are losing
+#  accounting Stop records or you use Calling-Station-Id instead of NAS-Port
+#  as your "pool_key" and your users are able to reconnect before your NAS
+#  has timed out their previous session. (Generally on wireless networks)
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = 'now'::timestamp(0) + '${lease_duration} second'::interval \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}'"
+
+#
+#  This query frees an IP address when an accounting
+#  STOP record arrives
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{SQL-User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This query extends an IP address lease by "lease_duration" when an accounting
+#  ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = 'now'::timestamp(0) + '${lease_duration} seconds'::interval \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND framedipaddress = '%{Framed-IP-Address}' \
+	AND username = '%{SQL-User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting ON record arrives from that NAS
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This query frees all IP addresses allocated to a NAS when an
+#  accounting OFF record arrives from that NAS
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		expiry_time = 'now'::timestamp(0) - '1 second'::interval \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql
new file mode 100644
index 0000000..9328f38
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/postgresql/schema.sql
@@ -0,0 +1,19 @@
+--
+-- Table structure for table 'radippool'
+--
+
+CREATE TABLE radippool (
+	id			BIGSERIAL PRIMARY KEY,
+	pool_name		varchar(64) NOT NULL,
+	FramedIPAddress		INET NOT NULL,
+	NASIPAddress		VARCHAR(16) NOT NULL default '',
+	pool_key		VARCHAR(64) NOT NULL default 0,
+	CalledStationId		VARCHAR(64),
+	CallingStationId	text NOT NULL default ''::text,
+	expiry_time		TIMESTAMP(0) without time zone NOT NULL default 'now'::timestamp(0),
+	username		text DEFAULT ''::text
+);
+
+CREATE INDEX radippool_poolname_expire ON radippool USING btree (pool_name, expiry_time);
+CREATE INDEX radippool_framedipaddress ON radippool USING btree (framedipaddress);
+CREATE INDEX radippool_nasip_poolkey_ipaddress ON radippool USING btree (nasipaddress, pool_key, framedipaddress);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf
new file mode 100644
index 0000000..fc8fa9f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/queries.conf
@@ -0,0 +1,162 @@
+# -*- text -*-
+#
+#  ippool/sqlite/queries.conf -- SQLite queries for rlm_sqlippool
+#
+#  $Id: e912bd32a7485f6a505dbb67ad6f54138845cdee $
+
+#
+#  This series of queries allocates an IP address
+#
+#allocate_clear = "\
+#	UPDATE ${ippool_table} \
+#	SET \
+#		nasipaddress = '', pool_key = 0, \
+#		callingstationid = '', username = '', \
+#		expiry_time = NULL \
+#	WHERE pool_key = '${pool_key}'"
+
+#
+#  This series of queries allocates an IP address
+#  (Note: If your pool_key is set to Calling-Station-Id and not NAS-Port
+#  then you may wish to delete the "AND nasipaddress = '%{Nas-IP-Address}'
+#  from the WHERE clause)
+#
+allocate_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE expiry_time <= datetime(strftime('%%s', 'now') - 1, 'unixepoch') \
+	AND nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  The ORDER BY clause of this query tries to allocate the same IP-address
+#  which user had last session...
+#
+allocate_find = "\
+	SELECT framedipaddress \
+	FROM ${ippool_table} \
+	WHERE pool_name = '%{control:Pool-Name}' \
+	AND (expiry_time < datetime('now') OR expiry_time IS NULL) \
+	ORDER BY \
+		(username <> '%{User-Name}'), \
+		(callingstationid <> '%{Calling-Station-Id}'), \
+		expiry_time \
+	LIMIT 1 \
+	FOR UPDATE"
+
+#
+#   If you prefer to allocate a random IP address every time, i
+#   use this query instead
+#
+
+#allocate_find = "\
+#	SELECT framedipaddress \
+#	FROM ${ippool_table} \
+# 	WHERE pool_name = '%{control:Pool-Name}' \
+#	AND expiry_time IS NULL \
+#	ORDER BY RAND() \
+# 	LIMIT 1 \
+#	FOR UPDATE"
+
+#
+#  If an IP could not be allocated, check to see if the pool exists or not
+#  This allows the module to differentiate between a full pool and no pool
+#  Note: If you are not running redundant pool modules this query may be
+#  commented out to save running this query every time an ip is not allocated.
+#
+pool_check = "\
+	SELECT id \
+	FROM ${ippool_table} \
+	WHERE pool_name='%{control:Pool-Name}' \
+	LIMIT 1"
+
+#
+#  This is the final IP Allocation query, which saves the allocated ip details
+#
+allocate_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '%{NAS-IP-Address}', \
+		pool_key = '${pool_key}', \
+		callingstationid = '%{Calling-Station-Id}', \
+		username = '%{User-Name}', \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE framedipaddress = '%I' \
+	AND expiry_time IS NULL"
+
+#
+#  This series of queries frees an IP number when an accounting START record arrives
+#
+start_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE nasipaddress = '%{NAS-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting STOP record arrives
+#
+stop_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees an IP number when an accounting
+#  ALIVE record arrives
+#
+alive_update = "\
+	UPDATE ${ippool_table} \
+	SET \
+		expiry_time = datetime(strftime('%%s', 'now') + ${lease_duration}, 'unixepoch') \
+	WHERE nasipaddress = '%{Nas-IP-Address}' \
+	AND pool_key = '${pool_key}' \
+	AND username = '%{User-Name}' \
+	AND callingstationid = '%{Calling-Station-Id}' \
+	AND framedipaddress = '%{Framed-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting ON record arrives
+#
+on_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
+#
+#  This series of queries frees the IP numbers allocate to a
+#  NAS when an accounting OFF record arrives
+#
+off_clear = "\
+	UPDATE ${ippool_table} \
+	SET \
+		nasipaddress = '', \
+		pool_key = 0, \
+		callingstationid = '', \
+		username = '', \
+		expiry_time = NULL \
+	WHERE nasipaddress = '%{Nas-IP-Address}'"
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql
new file mode 100644
index 0000000..8ac9fdf
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/ippool/sqlite/schema.sql
@@ -0,0 +1,18 @@
+--
+-- Table structure for table 'radippool'
+--
+CREATE TABLE  (
+  id                    int(11) PRIMARY KEY,
+  pool_name             varchar(30) NOT NULL,
+  framedipaddress       varchar(15) NOT NULL default '',
+  nasipaddress          varchar(15) NOT NULL default '',
+  calledstationid       VARCHAR(30) NOT NULL,
+  callingstationid      VARCHAR(30) NOT NULL,
+  expiry_time           DATETIME NULL default NULL,
+  username              varchar(64) NOT NULL default '',
+  pool_key              varchar(30) NOT NULL
+);
+
+CREATE INDEX radippool_poolname_expire ON radippool(pool_name, expiry_time);
+CREATE INDEX radippool_framedipaddress ON radippool(framedipaddress);
+CREATE INDEX radippool_nasip_poolkey_ipaddress ON radippool(nasipaddress, pool_key, framedipaddress);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf
new file mode 100644
index 0000000..9223d01
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/queries.conf
@@ -0,0 +1,270 @@
+# -*- text -*-
+#
+#  main/mssql/queries.conf -- MSSQL configuration for default schema (schema.sql)
+#
+#  $Id: 6fcea6edb5998f9f6c302f6246a88cdddf83dbaa $
+
+# Safe characters list for sql queries. Everything else is replaced
+# with their mime-encoded equivalents.
+# The default list should be ok
+#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used
+# below everywhere a username substitution is needed so you you can
+# be sure the username passed from the client is escaped properly.
+#
+# Uncomment the next line, if you want the sql_user_name to mean:
+#
+#    Use Stripped-User-Name, if it's there.
+#    Else use User-Name, if it's there,
+#    Else use hard-coded string "none" as the user name.
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
+#
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+#  Authorization Queries
+#######################################################################
+#  These queries compare the check items for the user
+#  in ${authcheck_table} and setup the reply items in
+#  ${authreply_table}.  You can use any query/tables
+#  you want, but the return data for each row MUST
+#  be in the  following order:
+#
+#  0. Row ID (currently unused)
+#  1. UserName/GroupName
+#  2. Item Attr Name
+#  3. Item Attr Value
+#  4. Item Attr Operation
+#######################################################################
+# Query for case sensitive usernames was removed. Please contact with me,
+# if you know analog of STRCMP functions for MS SQL.
+
+authorize_check_query = "\
+	SELECT id, UserName, Attribute, Value, op \
+	FROM ${authcheck_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_reply_query = "\
+	SELECT id, UserName, Attribute, Value, op \
+	FROM ${authreply_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_group_check_query = "\
+	SELECT \
+		${groupcheck_table}.id,${groupcheck_table}.GroupName, \
+		${groupcheck_table}.Attribute,${groupcheck_table}.Value, \
+		${groupcheck_table}.op \
+	FROM ${groupcheck_table},${usergroup_table} \
+	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
+	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
+	ORDER BY ${groupcheck_table}.id"
+
+authorize_group_reply_query = "\
+	SELECT \
+		${groupreply_table}.id, ${groupreply_table}.GroupName, \
+		${groupreply_table}.Attribute,${groupreply_table}.Value, \
+		${groupreply_table}.op \
+	FROM ${groupreply_table},${usergroup_table} \
+	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
+	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
+	ORDER BY ${groupreply_table}.id"
+
+group_membership_query = "\
+	SELECT groupname \
+	FROM ${usergroup_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY priority"
+
+#######################################################################
+# Accounting and Post-Auth Queries
+#######################################################################
+# These queries insert/update accounting and authentication records.
+# The query to use is determined by the value of 'reference'.
+# This value is used as a configuration path and should resolve to one
+# or more 'query's. If reference points to multiple queries, and a query
+# fails, the next query is executed.
+#
+# Behaviour is identical to the old 1.x/2.x module, except we can now
+# fail between N queries, and query selection can be based on any
+# combination of attributes, or custom 'Acct-Status-Type' values.
+#######################################################################
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/accounting.sql
+
+	type {
+		accounting-on {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStopTime='%S', \
+					AcctSessionTime=unix_timestamp('%S') - \
+						unix_timestamp(AcctStartTime), \
+					AcctTerminateCause='%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
+					AcctStopDelay = %{%{Acct-Delay-Time}:-0} \
+				WHERE AcctStopTime = 0 \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStartTime <= '%S'"
+		}
+
+		accounting-off {
+			query = "${..accounting-on.query}"
+		}
+
+		start {
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(AcctSessionId,		AcctUniqueId,		UserName, \
+					Realm,			NASIPAddress,		NASPort, \
+					NASPortType,		AcctStartTime, 		AcctSessionTime, \
+					AcctAuthentic,		ConnectInfo_start,	ConnectInfo_stop, \
+					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
+					CallingStationId,	AcctTerminateCause,	ServiceType, \
+					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
+					AcctStopDelay,		XAscendSessionSvrKey) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					'%S', \
+					'0', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					'0', \
+					'0', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'%{Acct-Delay-Time}', \
+					'0', \
+					'%{X-Ascend-Session-Svr-Key}')"
+
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStartTime = '%S', \
+					AcctStartDelay = '%{%{Acct-Delay-Time}:-0}', \
+					ConnectInfo_start = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStopTime = 0"
+		}
+
+		interim-update {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					FramedIPAddress = '%{Framed-IP-Address}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress= '%{NAS-IP-Address}' \
+				AND AcctStopTime = 0"
+
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(AcctSessionId,		AcctUniqueId,		UserName, \
+					Realm,			NASIPAddress,		NASPort, \
+					NASPortType,		AcctSessionTime,	AcctAuthentic, \
+					ConnectInfo_start,	AcctInputOctets,	AcctOutputOctets, \
+					CalledStationId,	CallingStationId,	ServiceType, \
+					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
+					XAscendSessionSvrKey) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'', \
+					'%{Acct-Input-Octets}', \
+					'%{Acct-Output-Octets}', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'0', \
+					'%{X-Ascend-Session-Svr-Key}')"
+		}
+
+		stop {
+			query = "\
+				UPDATE ${....acct_table2} \
+				SET \
+					AcctStopTime = '%S', \
+					AcctSessionTime = '%{Acct-Session-Time}', \
+					AcctInputOctets = '%{Acct-Input-Octets}', \
+					AcctOutputOctets = '%{Acct-Output-Octets}', \
+					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
+					AcctStopDelay = '%{%{Acct-Delay-Time}:-0}', \
+					ConnectInfo_stop = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStopTime = 0"
+
+			query = "\
+				INSERT into ${....acct_table2} \
+					(AcctSessionId,		AcctUniqueId,		UserName, \
+					Realm,			NASIPAddress,		NASPort, \
+					NASPortType,		AcctStopTime,		AcctSessionTime, \
+					AcctAuthentic,		ConnectInfo_start,	ConnectInfo_stop, \
+					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
+					CallingStationId,	AcctTerminateCause,	ServiceType, \
+					FramedProtocol,		FramedIPAddress,	AcctStartDelay, \
+					AcctStopDelay) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					'%S', \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'', \
+					'%{Connect-Info}', \
+					'%{Acct-Input-Octets}', \
+					'%{Acct-Output-Octets}', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Acct-Terminate-Cause}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'0', \
+					'%{%{Acct-Delay-Time}:-0}')"
+		}
+	}
+}
+
+post-auth {
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/post-auth.sql
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql
new file mode 100644
index 0000000..4062200
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mssql/schema.sql
@@ -0,0 +1,236 @@
+/***************************************************************************
+ * $Id: f89204918fc5951cb1920d5474563656ec9dee98 $		   *
+ *									   *
+ * db_mssql.sql                 					   *
+ *                                                                         *
+ * Database schema for MSSQL server					   *
+ *									   *
+ * To load:								   *
+ *  isql -S db_ip_addr -d db_name -U db_login -P db_passwd -i db_mssql.sql *
+ *									   *
+ * Based on: db_mysql.sql (Mike Machado <mike@innercite.com>)		   *
+ *									   *
+ *					Dmitri Ageev <d_ageev@ortcc.ru>    *
+ ***************************************************************************/
+
+/****** Object:  Table [radacct]    Script Date: 26.03.02 16:55:17 ******/
+CREATE TABLE [radacct] (
+	[RadAcctId] [numeric](21, 0) IDENTITY (1, 1) NOT NULL ,
+	[AcctSessionId] [varchar] (64) DEFAULT ('') FOR [AcctSessionId],
+	[AcctUniqueId] [varchar] (32) DEFAULT ('') FOR [AcctUniqueId],
+	[UserName] [varchar] (64) DEFAULT ('') FOR [UserName],
+	[GroupName] [varchar] (64) DEFAULT ('') FOR [GroupName],
+	[Realm] [varchar] (64) DEFAULT ('') FOR [Realm],
+	[NASIPAddress] [varchar] (15) DEFAULT ('') FOR [NASIPAddress],
+	[NASPortId] [varchar] (15) NULL ,
+	[NASPortType] [varchar] (32) NULL ,
+	[AcctStartTime] [datetime] NOT NULL ,
+	[AcctStopTime] [datetime] NOT NULL ,
+	[AcctSessionTime] [bigint] NULL ,
+	[AcctAuthentic] [varchar] (32) NULL ,
+	[ConnectInfo_start] [varchar] (32) DEFAULT (null) FOR [ConnectInfo_start],
+	[ConnectInfo_stop] [varchar] (32) DEFAULT (null) FOR [ConnectInfo_stop],
+	[AcctInputOctets] [bigint] NULL ,
+	[AcctOutputOctets] [bigint] NULL ,
+	[CalledStationId] [varchar] (30) DEFAULT ('') FOR [CalledStationId],
+	[CallingStationId] [varchar] (30) DEFAULT ('') FOR [CallingStationId],
+	[AcctTerminateCause] [varchar] (32) DEFAULT ('') FOR [AcctTerminateCause],
+	[ServiceType] [varchar] (32) NULL ,
+	[FramedProtocol] [varchar] (32) NULL ,
+	[FramedIPAddress] [varchar] (15) DEFAULT ('') FOR [FramedIPAddress],
+	[XAscendSessionSvrKey] [varchar] (10) DEFAULT (null) FOR [XAscendSessionSvrKey],
+	[AcctStartDelay] [int] NULL ,
+	[AcctStopDelay] [int] NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radcheck]    Script Date: 26.03.02 16:55:17 ******/
+CREATE TABLE [radcheck] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[UserName] [varchar] (64) NOT NULL ,
+	[Attribute] [varchar] (32) NOT NULL ,
+	[Value] [varchar] (253) NOT NULL ,
+	[op] [char] (2) NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radgroupcheck]    Script Date: 26.03.02 16:55:17 ******/
+CREATE TABLE [radgroupcheck] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[GroupName] [varchar] (64) NOT NULL ,
+	[Attribute] [varchar] (32) NOT NULL ,
+	[Value] [varchar] (253) NOT NULL ,
+	[op] [char] (2) NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radgroupreply]    Script Date: 26.03.02 16:55:17 ******/
+CREATE TABLE [radgroupreply] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[GroupName] [varchar] (64) NOT NULL ,
+	[Attribute] [varchar] (32) NOT NULL ,
+	[Value] [varchar] (253) NOT NULL ,
+	[op] [char] (2) NULL ,
+	[prio] [int] NOT NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radreply]    Script Date: 26.03.02 16:55:18 ******/
+CREATE TABLE [radreply] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[UserName] [varchar] (64) NOT NULL ,
+	[Attribute] [varchar] (32) NOT NULL ,
+	[Value] [varchar] (253) NOT NULL ,
+	[op] [char] (2) NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radusergroup]    Script Date: 26.03.02 16:55:18 ******/
+CREATE TABLE [radusergroup] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[UserName] [varchar] (64) NOT NULL ,
+	[GroupName] [varchar] (64) NULL
+) ON [PRIMARY]
+GO
+
+/****** Object:  Table [radusergroup]    Script Date: 16.04.08 19:44:11 ******/
+CREATE TABLE [radpostauth] (
+	[id] [int] IDENTITY (1, 1) NOT NULL ,
+	[userName] [varchar] (64) NOT NULL ,
+	[pass] [varchar] (64) NOT NULL ,
+	[reply] [varchar] (32) NOT NULL ,
+	[authdate] [datetime] NOT NULL
+)
+GO
+
+ALTER TABLE [radacct] WITH NOCHECK ADD
+	CONSTRAINT [DF_radacct_GroupName] DEFAULT ('') FOR [GroupName],
+	CONSTRAINT [DF_radacct_AcctSessionId] DEFAULT ('') FOR [AcctSessionId],
+	CONSTRAINT [DF_radacct_AcctUniqueId] DEFAULT ('') FOR [AcctUniqueId],
+	CONSTRAINT [DF_radacct_UserName] DEFAULT ('') FOR [UserName],
+	CONSTRAINT [DF_radacct_Realm] DEFAULT ('') FOR [Realm],
+	CONSTRAINT [DF_radacct_NASIPAddress] DEFAULT ('') FOR [NASIPAddress],
+	CONSTRAINT [DF_radacct_NASPortId] DEFAULT (null) FOR [NASPortId],
+	CONSTRAINT [DF_radacct_NASPortType] DEFAULT (null) FOR [NASPortType],
+	CONSTRAINT [DF_radacct_AcctStartTime] DEFAULT ('1900-01-01 00:00:00') FOR [AcctStartTime],
+	CONSTRAINT [DF_radacct_AcctStopTime] DEFAULT ('1900-01-01 00:00:00') FOR [AcctStopTime],
+	CONSTRAINT [DF_radacct_AcctSessionTime] DEFAULT (null) FOR [AcctSessionTime],
+	CONSTRAINT [DF_radacct_AcctAuthentic] DEFAULT (null) FOR [AcctAuthentic],
+	CONSTRAINT [DF_radacct_ConnectInfo_start] DEFAULT (null) FOR [ConnectInfo_start],
+	CONSTRAINT [DF_radacct_ConnectInfo_stop] DEFAULT (null) FOR [ConnectInfo_stop],
+	CONSTRAINT [DF_radacct_AcctInputOctets] DEFAULT (null) FOR [AcctInputOctets],
+	CONSTRAINT [DF_radacct_AcctOutputOctets] DEFAULT (null) FOR [AcctOutputOctets],
+	CONSTRAINT [DF_radacct_CalledStationId] DEFAULT ('') FOR [CalledStationId],
+	CONSTRAINT [DF_radacct_CallingStationId] DEFAULT ('') FOR [CallingStationId],
+	CONSTRAINT [DF_radacct_AcctTerminateCause] DEFAULT ('') FOR [AcctTerminateCause],
+	CONSTRAINT [DF_radacct_ServiceType] DEFAULT (null) FOR [ServiceType],
+	CONSTRAINT [DF_radacct_FramedProtocol] DEFAULT (null) FOR [FramedProtocol],
+	CONSTRAINT [DF_radacct_FramedIPAddress] DEFAULT ('') FOR [FramedIPAddress],
+	CONSTRAINT [DF_radacct_AcctStartDelay] DEFAULT (null) FOR [AcctStartDelay],
+	CONSTRAINT [DF_radacct_AcctStopDelay] DEFAULT (null) FOR [AcctStopDelay],
+	CONSTRAINT [PK_radacct] PRIMARY KEY  NONCLUSTERED
+	(
+		[RadAcctId]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radcheck] WITH NOCHECK ADD
+	CONSTRAINT [DF_radcheck_UserName] DEFAULT ('') FOR [UserName],
+	CONSTRAINT [DF_radcheck_Attribute] DEFAULT ('') FOR [Attribute],
+	CONSTRAINT [DF_radcheck_Value] DEFAULT ('') FOR [Value],
+	CONSTRAINT [DF_radcheck_op] DEFAULT (null) FOR [op],
+	CONSTRAINT [PK_radcheck] PRIMARY KEY  NONCLUSTERED
+	(
+		[id]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radgroupcheck] WITH NOCHECK ADD
+	CONSTRAINT [DF_radgroupcheck_GroupName] DEFAULT ('') FOR [GroupName],
+	CONSTRAINT [DF_radgroupcheck_Attribute] DEFAULT ('') FOR [Attribute],
+	CONSTRAINT [DF_radgroupcheck_Value] DEFAULT ('') FOR [Value],
+	CONSTRAINT [DF_radgroupcheck_op] DEFAULT (null) FOR [op],
+	CONSTRAINT [PK_radgroupcheck] PRIMARY KEY  NONCLUSTERED
+	(
+		[id]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radgroupreply] WITH NOCHECK ADD
+	CONSTRAINT [DF_radgroupreply_GroupName] DEFAULT ('') FOR [GroupName],
+	CONSTRAINT [DF_radgroupreply_Attribute] DEFAULT ('') FOR [Attribute],
+	CONSTRAINT [DF_radgroupreply_Value] DEFAULT ('') FOR [Value],
+	CONSTRAINT [DF_radgroupreply_op] DEFAULT (null) FOR [op],
+	CONSTRAINT [DF_radgroupreply_prio] DEFAULT (0) FOR [prio],
+	CONSTRAINT [PK_radgroupreply] PRIMARY KEY  NONCLUSTERED
+	(
+		[id]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radreply] WITH NOCHECK ADD
+	CONSTRAINT [DF_radreply_UserName] DEFAULT ('') FOR [UserName],
+	CONSTRAINT [DF_radreply_Attribute] DEFAULT ('') FOR [Attribute],
+	CONSTRAINT [DF_radreply_Value] DEFAULT ('') FOR [Value],
+	CONSTRAINT [DF_radreply_op] DEFAULT (null) FOR [op],
+	CONSTRAINT [PK_radreply] PRIMARY KEY  NONCLUSTERED
+	(
+		[id]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radusergroup] WITH NOCHECK ADD
+	CONSTRAINT [DF_radusergroup_UserName] DEFAULT ('') FOR [UserName],
+	CONSTRAINT [DF_radusergroup_GroupName] DEFAULT ('') FOR [GroupName],
+	CONSTRAINT [PK_radusergroup] PRIMARY KEY  NONCLUSTERED
+	(
+		[id]
+	)  ON [PRIMARY]
+GO
+
+ALTER TABLE [radpostauth] WITH NOCHECK ADD
+	CONSTRAINT [DF_radpostauth_userName] DEFAULT ('') FOR [userName],
+	CONSTRAINT [DF_radpostauth_pass] DEFAULT ('') FOR [pass],
+	CONSTRAINT [DF_radpostauth_reply] DEFAULT ('') FOR [reply],
+	CONSTRAINT [DF_radpostauth_authdate] DEFAULT (getdate()) FOR [authdate],
+	CONSTRAINT [PK_radpostauth] PRIMARY KEY NONCLUSTERED
+	(
+		[id]
+	) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [UserName] ON [radacct]([UserName]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [FramedIPAddress] ON [radacct]([FramedIPAddress]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [AcctSessionId] ON [radacct]([AcctSessionId]) ON [PRIMARY]
+GO
+
+ CREATE  UNIQUE INDEX [AcctUniqueId] ON [radacct]([AcctUniqueId]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [AcctStartTime] ON [radacct]([AcctStartTime]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [AcctStopTime] ON [radacct]([AcctStopTime]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [NASIPAddress] ON [radacct]([NASIPAddress]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [UserName] ON [radcheck]([UserName]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [GroupName] ON [radgroupcheck]([GroupName]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [GroupName] ON [radgroupreply]([GroupName]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [UserName] ON [radreply]([UserName]) ON [PRIMARY]
+GO
+
+ CREATE  INDEX [UserName] ON [radusergroup]([UserName]) ON [PRIMARY]
+GO
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf
new file mode 100644
index 0000000..4087cb5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/queries.conf
@@ -0,0 +1,40 @@
+# -*- text -*-
+##
+## wimax.conf -- MySQL configuration for WiMAX keying
+##
+##	$Id: 26942305017c59d4589d0645cfc79405b98b4c6a $
+
+# Safe characters list for sql queries. Everything else is replaced
+# with their mime-encoded equivalents.
+# The default list should be ok
+#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
+# everywhere a username substitution is needed so you you can be sure
+# the username passed from the client is escaped properly.
+#
+#  Uncomment the next line, if you want the sql_user_name to mean:
+#
+#    Use Stripped-User-Name, if it's there.
+#    Else use User-Name, if it's there,
+#    Else use hard-coded string "DEFAULT" as the user name.
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
+#
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+# Logging of WiMAX SPI -> key mappings
+#######################################################################
+# postauth_query		- Insert some info after authentication
+#######################################################################
+
+postauth_query = "INSERT INTO wimax \
+		  (username, authdate, spi, mipkey, lifetime) \
+		  VALUES ( \
+		  '%{User-Name}', '%S' \
+		  '%{%{reply:WiMAX-MN-hHA-MIP4-SPI}:-%{reply:WiMAX-MN-hHA-MIP6-SPI}}', \
+		  '%{%{reply:WiMAX-MN-hHA-MIP4-Key}:-%{reply:WiMAX-MN-hHA-MIP6-Key}}', '%{%{reply:Session-Timeout}:-86400}' )"
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql
new file mode 100644
index 0000000..e32224a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/extras/wimax/schema.sql
@@ -0,0 +1,16 @@
+#
+# WiMAX Table structure for table 'wimax',
+# which replaces the "radpostauth" table.
+#
+
+CREATE TABLE wimax (
+  id int(11) NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  authdate timestamp NOT NULL,
+  spi varchar(16) NOT NULL default '',
+  mipkey varchar(400) NOT NULL default '',
+  lifetime int(12) default NULL,
+  PRIMARY KEY  (id),
+  KEY username (username),
+  KEY spi (spi)
+) ;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf
new file mode 100644
index 0000000..60c0f27
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/queries.conf
@@ -0,0 +1,414 @@
+# -*- text -*-
+#
+#  main/mysql/queries.conf-- MySQL configuration for default schema (schema.sql)
+#
+#  $Id: 0b3c210d6c0b04350d1a48738764b47f25f51bc4 $
+
+# Safe characters list for sql queries. Everything else is replaced
+# with their mime-encoded equivalents.
+# The default list should be ok
+#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+
+#######################################################################
+#  Connection config
+#######################################################################
+# The character set is not configurable. The default character set of
+# the mysql client library is used. To control the character set,
+# create/edit my.cnf (typically in /etc/mysql/my.cnf or /etc/my.cnf)
+# and enter
+# [client]
+# default-character-set = utf8
+#
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used below
+# everywhere a username substitution is needed so you you can be sure
+# the username passed from the client is escaped properly.
+#
+# Uncomment the next line, if you want the sql_user_name to mean:
+#
+#	Use Stripped-User-Name, if it's there.
+#	Else use User-Name, if it's there,
+#	Else use hard-coded string "DEFAULT" as the user name.
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
+#
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+# Default profile
+#######################################################################
+# This is the default profile. It is found in SQL by group membership.
+# That means that this profile must be a member of at least one group
+# which will contain the corresponding check and reply items.
+# This profile will be queried in the authorize section for every user.
+# The point is to assign all users a default profile without having to
+# manually add each one to a group that will contain the profile.
+# The SQL module will also honor the User-Profile attribute. This
+# attribute can be set anywhere in the authorize section (ie the users
+# file). It is found exactly as the default profile is found.
+# If it is set then it will *overwrite* the default profile setting.
+# The idea is to select profiles based on checks on the incoming packets,
+# not on user group membership. For example:
+# -- users file --
+# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
+# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
+#
+# By default the default_user_profile is not set
+#
+#default_user_profile = "DEFAULT"
+
+#######################################################################
+# NAS Query
+#######################################################################
+# This query retrieves the radius clients
+#
+# 0. Row ID (currently unused)
+# 1. Name (or IP address)
+# 2. Shortname
+# 3. Type
+# 4. Secret
+# 5. Server
+#######################################################################
+
+client_query = "\
+	SELECT id, nasname, shortname, type, secret, server \
+	FROM ${client_table}"
+
+#######################################################################
+# Authorization Queries
+#######################################################################
+# These queries compare the check items for the user
+# in ${authcheck_table} and setup the reply items in
+# ${authreply_table}. You can use any query/tables
+# you want, but the return data for each row MUST
+# be in the following order:
+#
+# 0. Row ID (currently unused)
+# 1. UserName/GroupName
+# 2. Item Attr Name
+# 3. Item Attr Value
+# 4. Item Attr Operation
+#######################################################################
+# Use these for case sensitive usernames.
+
+#authorize_check_query = "\
+#	SELECT id, username, attribute, value, op \
+#	FROM ${authcheck_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY id"
+
+#authorize_reply_query = "\
+#	SELECT id, username, attribute, value, op \
+#	FROM ${authreply_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY id"
+
+#
+#  The default queries are case insensitive. (for compatibility with
+#  older versions of FreeRADIUS)
+#
+authorize_check_query = "\
+	SELECT id, username, attribute, value, op \
+	FROM ${authcheck_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_reply_query = "\
+	SELECT id, username, attribute, value, op \
+	FROM ${authreply_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+#
+#  Use these for case sensitive usernames.
+#
+group_membership_query = "\
+#	SELECT groupname \
+#	FROM ${usergroup_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY priority"
+
+group_membership_query = "\
+	SELECT groupname \
+	FROM ${usergroup_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY priority"
+
+authorize_group_check_query = "\
+	SELECT id, groupname, attribute, \
+	Value, op \
+	FROM ${groupcheck_table} \
+	WHERE groupname = '%{Sql-Group}' \
+	ORDER BY id"
+
+authorize_group_reply_query = "\
+	SELECT id, groupname, attribute, \
+	value, op \
+	FROM ${groupreply_table} \
+	WHERE groupname = '%{Sql-Group}' \
+	ORDER BY id"
+
+#######################################################################
+# Simultaneous Use Checking Queries
+#######################################################################
+# simul_count_query	- query for the number of current connections
+#			- If this is not defined, no simultaneouls use checking
+#			- will be performed by this module instance
+# simul_verify_query	- query to return details of current connections
+#				for verification
+#			- Leave blank or commented out to disable verification step
+#			- Note that the returned field order should not be changed.
+#######################################################################
+
+#
+#  Uncomment simul_count_query to enable simultaneous use checking
+#
+#simul_count_query = "\
+#	SELECT COUNT(*) \
+#	FROM ${acct_table1} \
+#	WHERE username = '%{SQL-User-Name}' \
+#	AND acctstoptime IS NULL"
+
+simul_verify_query = "\
+	SELECT \
+		radacctid, acctsessionid, username, nasipaddress, nasportid, framedipaddress, \
+		callingstationid, framedprotocol \
+	FROM ${acct_table1} \
+	WHERE username = '%{SQL-User-Name}' \
+	AND acctstoptime IS NULL"
+
+#######################################################################
+# Accounting and Post-Auth Queries
+#######################################################################
+# These queries insert/update accounting and authentication records.
+# The query to use is determined by the value of 'reference'.
+# This value is used as a configuration path and should resolve to one
+# or more 'query's. If reference points to multiple queries, and a query
+# fails, the next query is executed.
+#
+# Behaviour is identical to the old 1.x/2.x module, except we can now
+# fail between N queries, and query selection can be based on any
+# combination of attributes, or custom 'Acct-Status-Type' values.
+#######################################################################
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/accounting.sql
+
+	column_list = "\
+		acctsessionid,		acctuniqueid,		username, \
+		realm,			nasipaddress,		nasportid, \
+		nasporttype,		acctstarttime,		acctupdatetime, \
+		acctstoptime,		acctsessiontime, 	acctauthentic, \
+		connectinfo_start,	connectinfo_stop, 	acctinputoctets, \
+		acctoutputoctets,	calledstationid, 	callingstationid, \
+		acctterminatecause,	servicetype,		framedprotocol, \
+		framedipaddress"
+
+	type {
+		accounting-on {
+			#
+			#  Bulk terminate all sessions associated with a given NAS
+			#
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					acctstoptime = FROM_UNIXTIME(\
+						%{integer:Event-Timestamp}), \
+					acctsessiontime	= '%{integer:Event-Timestamp}' \
+						- UNIX_TIMESTAMP(acctstarttime), \
+					acctterminatecause = '%{%{Acct-Terminate-Cause}:-NAS-Reboot}' \
+				WHERE acctstoptime IS NULL \
+				AND nasipaddress   = '%{NAS-IP-Address}' \
+				AND acctstarttime <= FROM_UNIXTIME(\
+					%{integer:Event-Timestamp})"
+		}
+
+		accounting-off {
+			query = "${..accounting-on.query}"
+		}
+
+		start {
+			#
+			#  Insert a new record into the sessions table
+			#
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					NULL, \
+					'0', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					'0', \
+					'0', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+
+			#
+			#  Key constraints prevented us from inserting a new session,
+			#  use the alternate query to update an existing session.
+			#
+			query = "\
+				UPDATE ${....acct_table1} SET \
+					acctstarttime	= FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					acctupdatetime	= FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					connectinfo_start = '%{Connect-Info}' \
+				WHERE acctsessionid = '%{Acct-Session-Id}' \
+				AND username		= '%{SQL-User-Name}' \
+				AND nasipaddress	= '%{NAS-IP-Address}'"
+		}
+
+		interim-update {
+			#
+			#  Update an existing session and calculate the interval
+			#  between the last data we received for the session and this
+			#  update. This can be used to find stale sessions.
+			#
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					acctupdatetime  = (@acctupdatetime_old:=acctupdatetime), \
+					acctupdatetime  = FROM_UNIXTIME(\
+						%{integer:Event-Timestamp}), \
+					acctinterval    = %{integer:Event-Timestamp} - \
+						UNIX_TIMESTAMP(@acctupdatetime_old), \
+					framedipaddress = '%{Framed-IP-Address}', \
+					acctsessiontime = '%{Acct-Session-Time}', \
+					acctinputoctets = '%{%{Acct-Input-Gigawords}:-0}' \
+						<< 32 | '%{%{Acct-Input-Octets}:-0}', \
+					acctoutputoctets = '%{%{Acct-Output-Gigawords}:-0}' \
+						<< 32 | '%{%{Acct-Output-Octets}:-0}' \
+				WHERE acctsessionid     = '%{Acct-Session-Id}' \
+				AND username            = '%{SQL-User-Name}' \
+				AND nasipaddress        = '%{NAS-IP-Address}'"
+
+			#
+			#  The update condition matched no existing sessions. Use
+			#  the values provided in the update to create a new session.
+			#
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					FROM_UNIXTIME(%{integer:Event-Timestamp} - \
+						%{%{Acct-Session-Time}:-0}), \
+					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					NULL, \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					'%{%{Acct-Input-Gigawords}:-0}' << 32 | \
+						'%{%{Acct-Input-Octets}:-0}', \
+					'%{%{Acct-Output-Gigawords}:-0}' << 32 | \
+						'%{%{Acct-Output-Octets}:-0}', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+		}
+
+		stop {
+			#
+			#  Session has terminated, update the stop time and statistics.
+			#
+			query = "\
+				UPDATE ${....acct_table2} SET \
+					acctstoptime	= FROM_UNIXTIME(\
+						%{integer:Event-Timestamp}), \
+					acctsessiontime	= '%{Acct-Session-Time}', \
+					acctinputoctets	= '%{%{Acct-Input-Gigawords}:-0}' \
+						<< 32 | '%{%{Acct-Input-Octets}:-0}', \
+					acctoutputoctets = '%{%{Acct-Output-Gigawords}:-0}' \
+						<< 32 | '%{%{Acct-Output-Octets}:-0}', \
+					acctterminatecause = '%{Acct-Terminate-Cause}', \
+					connectinfo_stop = '%{Connect-Info}' \
+				WHERE acctsessionid 	= '%{Acct-Session-Id}' \
+				AND username		= '%{SQL-User-Name}' \
+				AND nasipaddress	= '%{NAS-IP-Address}'"
+
+			#
+			#  The update condition matched no existing sessions. Use
+			#  the values provided in the update to create a new session.
+			#
+			query = "\
+				INSERT INTO ${....acct_table2} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					FROM_UNIXTIME(%{integer:Event-Timestamp} - \
+						%{%{Acct-Session-Time}:-0}), \
+					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					FROM_UNIXTIME(%{integer:Event-Timestamp}), \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', '', \
+					'%{Connect-Info}', \
+					'%{%{Acct-Input-Gigawords}:-0}' << 32 | \
+						'%{%{Acct-Input-Octets}:-0}', \
+					'%{%{Acct-Output-Gigawords}:-0}' << 32 | \
+						'%{%{Acct-Output-Octets}:-0}', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Acct-Terminate-Cause}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+		}
+	}
+}
+
+#######################################################################
+# Authentication Logging Queries
+#######################################################################
+# postauth_query	- Insert some info after authentication
+#######################################################################
+
+post-auth {
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/post-auth.sql
+
+	query =	"\
+		INSERT INTO ${..postauth_table} \
+			(username, pass, reply, authdate) \
+		VALUES ( \
+			'%{SQL-User-Name}', \
+			'%{%{User-Password}:-%{Chap-Password}}', \
+			'%{reply:Packet-Type}', \
+			'%S')"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql
new file mode 100644
index 0000000..7a26230
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/schema.sql
@@ -0,0 +1,150 @@
+###########################################################################
+# $Id: c5185bee856646733a6bd9b341109cde0688b8f1 $                 #
+#                                                                         #
+#  schema.sql                       rlm_sql - FreeRADIUS SQL Module       #
+#                                                                         #
+#     Database schema for MySQL rlm_sql module                            #
+#                                                                         #
+#     To load:                                                            #
+#         mysql -uroot -prootpass radius < schema.sql                     #
+#                                                                         #
+#                                   Mike Machado <mike@innercite.com>     #
+###########################################################################
+#
+# Table structure for table 'radacct'
+#
+
+CREATE TABLE radacct (
+  radacctid bigint(21) NOT NULL auto_increment,
+  acctsessionid varchar(64) NOT NULL default '',
+  acctuniqueid varchar(32) NOT NULL default '',
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  realm varchar(64) default '',
+  nasipaddress varchar(15) NOT NULL default '',
+  nasportid varchar(15) default NULL,
+  nasporttype varchar(32) default NULL,
+  acctstarttime datetime NULL default NULL,
+  acctupdatetime datetime NULL default NULL,
+  acctstoptime datetime NULL default NULL,
+  acctinterval int(12) default NULL,
+  acctsessiontime int(12) unsigned default NULL,
+  acctauthentic varchar(32) default NULL,
+  connectinfo_start varchar(50) default NULL,
+  connectinfo_stop varchar(50) default NULL,
+  acctinputoctets bigint(20) default NULL,
+  acctoutputoctets bigint(20) default NULL,
+  calledstationid varchar(50) NOT NULL default '',
+  callingstationid varchar(50) NOT NULL default '',
+  acctterminatecause varchar(32) NOT NULL default '',
+  servicetype varchar(32) default NULL,
+  framedprotocol varchar(32) default NULL,
+  framedipaddress varchar(15) NOT NULL default '',
+  PRIMARY KEY (radacctid),
+  UNIQUE KEY acctuniqueid (acctuniqueid),
+  KEY username (username),
+  KEY framedipaddress (framedipaddress),
+  KEY acctsessionid (acctsessionid),
+  KEY acctsessiontime (acctsessiontime),
+  KEY acctstarttime (acctstarttime),
+  KEY acctinterval (acctinterval),
+  KEY acctstoptime (acctstoptime),
+  KEY nasipaddress (nasipaddress)
+) ENGINE = INNODB;
+
+#
+# Table structure for table 'radcheck'
+#
+
+CREATE TABLE radcheck (
+  id int(11) unsigned NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253) NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY username (username(32))
+);
+
+#
+# Table structure for table 'radgroupcheck'
+#
+
+CREATE TABLE radgroupcheck (
+  id int(11) unsigned NOT NULL auto_increment,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253)  NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY groupname (groupname(32))
+);
+
+#
+# Table structure for table 'radgroupreply'
+#
+
+CREATE TABLE radgroupreply (
+  id int(11) unsigned NOT NULL auto_increment,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253)  NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY groupname (groupname(32))
+);
+
+#
+# Table structure for table 'radreply'
+#
+
+CREATE TABLE radreply (
+  id int(11) unsigned NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64) NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253) NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY username (username(32))
+);
+
+
+#
+# Table structure for table 'radusergroup'
+#
+
+CREATE TABLE radusergroup (
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  priority int(11) NOT NULL default '1',
+  KEY username (username(32))
+);
+
+#
+# Table structure for table 'radpostauth'
+#
+CREATE TABLE radpostauth (
+  id int(11) NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  pass varchar(64) NOT NULL default '',
+  reply varchar(32) NOT NULL default '',
+  authdate timestamp NOT NULL,
+  PRIMARY KEY  (id)
+) ENGINE = INNODB;
+
+#
+# Table structure for table 'nas'
+#
+CREATE TABLE nas (
+  id int(10) NOT NULL auto_increment,
+  nasname varchar(128) NOT NULL,
+  shortname varchar(32),
+  type varchar(30) DEFAULT 'other',
+  ports int(5),
+  secret varchar(60) DEFAULT 'secret' NOT NULL,
+  server varchar(64),
+  community varchar(50),
+  description varchar(200) DEFAULT 'RADIUS Client',
+  PRIMARY KEY (id),
+  KEY nasname (nasname)
+);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql
new file mode 100644
index 0000000..d04a711
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/mysql/setup.sql
@@ -0,0 +1,24 @@
+# -*- text -*-
+##
+## admin.sql -- MySQL commands for creating the RADIUS user.
+##
+##	WARNING: You should change 'localhost' and 'radpass'
+##		 to something else.  Also update raddb/sql.conf
+##		 with the new RADIUS password.
+##
+##	$Id: aff0505a473c67b65cfc19fae079454a36d4e119 $
+
+#
+#  Create default administrator for RADIUS
+#
+CREATE USER 'radius'@'localhost';
+SET PASSWORD FOR 'radius'@'localhost' = PASSWORD('radpass');
+
+# The server can read any table in SQL
+GRANT SELECT ON radius.* TO 'radius'@'localhost';
+
+# The server can write to the accounting and post-auth logging table.
+#
+#  i.e.
+GRANT ALL on radius.radacct TO 'radius'@'localhost';
+GRANT ALL on radius.radpostauth TO 'radius'@'localhost';
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README
new file mode 100644
index 0000000..71f5aa3
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/README
@@ -0,0 +1,5 @@
+  The SQL schema and 'create admin user" scripts are here in order to
+simplify the process of using MySQL cluster.
+
+  The queries are NOT located here, because the database driver for
+MySQL cluster is just "mysql", and not "ndb".
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql
new file mode 100644
index 0000000..40ee953
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/schema.sql
@@ -0,0 +1,135 @@
+###########################################################################
+# $Id: a7f4c3121ded2b6557294de8bcab832c5715d038 $                 #
+#                                                                         #
+#  schema.sql                       rlm_sql - FreeRADIUS SQL Module       #
+#                                                                         #
+#     Database schema for MySQL Cluster.				  #
+#     The only difference between this file and ../mysql/schema.sql       #
+#     is the definition of the storage engine.                            #
+#                                                                         #
+#     To load:                                                            #
+#         mysql -uroot -prootpass radius < schema.sql                     #
+#                                                                         #
+#                                   Mike Machado <mike@innercite.com>     #
+###########################################################################
+#
+# Table structure for table 'radacct'
+#
+
+CREATE TABLE radacct (
+  radacctid bigint(21) NOT NULL auto_increment,
+  acctsessionid varchar(64) NOT NULL default '',
+  acctuniqueid varchar(32) NOT NULL default '',
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  realm varchar(64) default '',
+  nasipaddress varchar(15) NOT NULL default '',
+  nasportid varchar(15) default NULL,
+  nasporttype varchar(32) default NULL,
+  acctstarttime datetime NULL default NULL,
+  acctupdatetime datetime NULL default NULL,
+  acctstoptime datetime NULL default NULL,
+  acctinterval int(12) default NULL,
+  acctsessiontime int(12) default NULL,
+  acctauthentic varchar(32) default NULL,
+  connectinfo_start varchar(50) default NULL,
+  connectinfo_stop varchar(50) default NULL,
+  acctinputoctets bigint(20) default NULL,
+  acctoutputoctets bigint(20) default NULL,
+  calledstationid varchar(50) NOT NULL default '',
+  callingstationid varchar(50) NOT NULL default '',
+  acctterminatecause varchar(32) NOT NULL default '',
+  servicetype varchar(32) default NULL,
+  framedprotocol varchar(32) default NULL,
+  framedipaddress varchar(15) NOT NULL default ''
+  PRIMARY KEY  (radacctid),
+  UNIQUE KEY acctuniqueid (acctuniqueid),
+  KEY username (username),
+  KEY framedipaddress (framedipaddress),
+  KEY acctsessionid (acctsessionid),
+  KEY acctsessiontime (acctsessiontime),
+  KEY acctstarttime (acctstarttime),
+  KEY acctstoptime (acctstoptime),
+  KEY nasipaddress (nasipaddress)
+) ENGINE=ndbcluster;
+
+#
+# Table structure for table 'radcheck'
+#
+
+CREATE TABLE radcheck (
+  id int(11) unsigned NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253) NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY username (username(32))
+) ENGINE=ndbcluster;
+
+#
+# Table structure for table 'radgroupcheck'
+#
+
+CREATE TABLE radgroupcheck (
+  id int(11) unsigned NOT NULL auto_increment,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253)  NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY groupname (groupname(32))
+) ENGINE=ndbcluster;
+
+#
+# Table structure for table 'radgroupreply'
+#
+
+CREATE TABLE radgroupreply (
+  id int(11) unsigned NOT NULL auto_increment,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253)  NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY groupname (groupname(32))
+) ENGINE=ndbcluster;
+
+#
+# Table structure for table 'radreply'
+#
+
+CREATE TABLE radreply (
+  id int(11) unsigned NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64) NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253) NOT NULL default '',
+  PRIMARY KEY  (id),
+  KEY username (username(32))
+) ENGINE=ndbcluster;
+
+
+#
+# Table structure for table 'radusergroup'
+#
+
+CREATE TABLE radusergroup (
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  priority int(11) NOT NULL default '1',
+  KEY username (username(32))
+) ENGINE=ndbcluster;
+
+#
+# Table structure for table 'radpostauth'
+#
+
+CREATE TABLE radpostauth (
+  id int(11) NOT NULL auto_increment,
+  username varchar(64) NOT NULL default '',
+  pass varchar(64) NOT NULL default '',
+  reply varchar(32) NOT NULL default '',
+  authdate timestamp NOT NULL,
+  PRIMARY KEY  (id)
+) ENGINE=ndbcluster;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql
new file mode 100644
index 0000000..fda4cb9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/ndb/setup.sql
@@ -0,0 +1,25 @@
+# -*- text -*-
+##
+## admin.sql -- MySQL commands for creating the RADIUS user.
+##
+##	WARNING: You should change 'localhost' and 'radpass'
+##		 to something else.  Also update raddb/sql.conf
+##		 with the new RADIUS password.
+##
+##	$Id: 5c91384c0991ea9614b7c798a1ab4c89ca227115 $
+
+#
+#  Create default administrator for RADIUS
+#
+CREATE USER 'radius'@'localhost';
+SET PASSWORD FOR 'radius'@'localhost' = PASSWORD('radpass');
+
+# The server can read any table in SQL
+GRANT ALL ON radius.* TO 'radius'@'localhost' identified by 'radpass';
+GRANT ALL ON radius.* TO 'radius'@'radsrvr' identified by 'radpass';
+
+# The server can write to the accounting and post-auth logging table.
+#
+#  i.e.
+#GRANT ALL on radius.radacct TO 'radius'@'localhost' identified by 'radpass';
+#GRANT ALL on radius.radacct TO 'radius'@'radsrvr' identified by 'radpass';
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf
new file mode 100644
index 0000000..c062b66
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/queries.conf
@@ -0,0 +1,382 @@
+# -*- text -*-
+#
+#  main/oracle/queries.conf -- Oracle configuration for default schema (schema.sql)
+#
+#  $Id: ca22f5f5c9bf5dff47e60fb2bed56d6b161a4d08 $
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
+# everywhere a username substitution is needed so you you can be sure
+# the username passed from the client is escaped properly.
+#
+#  Uncomment the next line, if you want the sql_user_name to mean:
+#
+#    Use Stripped-User-Name, if it's there.
+#    Else use User-Name, if it's there,
+#    Else use hard-coded string "DEFAULT" as the user name.
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
+#
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+#  Default profile
+#######################################################################
+# This is the default profile. It is found in SQL by group membership.
+# That means that this profile must be a member of at least one group
+# which will contain the corresponding check and reply items.
+# This profile will be queried in the authorize section for every user.
+# The point is to assign all users a default profile without having to
+# manually add each one to a group that will contain the profile.
+# The SQL module will also honor the User-Profile attribute. This
+# attribute can be set anywhere in the authorize section (ie the users
+# file). It is found exactly as the default profile is found.
+# If it is set then it will *overwrite* the default profile setting.
+# The idea is to select profiles based on checks on the incoming packets,
+# not on user group membership. For example:
+# -- users file --
+# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
+# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
+#
+# By default the default_user_profile is not set
+#
+#default_user_profile = "DEFAULT"
+#
+# Determines if we will query the default_user_profile or the User-Profile
+# if the user is not found. If the profile is found then we consider the user
+# found. By default this is set to 'no'.
+#
+#query_on_not_found = no
+
+
+#######################################################################
+#  NAS Query
+#######################################################################
+#  This query retrieves the radius clients
+#
+#  0. Row ID (currently unused)
+#  1. Name (or IP address)
+#  2. Shortname
+#  3. Type
+#  4. Secret
+#  5. Virtual server
+#######################################################################
+
+client_query = "\
+	SELECT id, nasname, shortname, type, secret, server \
+	FROM ${client_table}"
+
+#######################################################################
+#  Authorization Queries
+#######################################################################
+#  These queries compare the check items for the user
+#  in ${authcheck_table} and setup the reply items in
+#  ${authreply_table}.  You can use any query/tables
+#  you want, but the return data for each row MUST
+#  be in the  following order:
+#
+#  0. Row ID (currently unused)
+#  1. UserName/GroupName
+#  2. Item Attr Name
+#  3. Item Attr Value
+#  4. Item Attr Operation
+#######################################################################
+#
+# WARNING: Oracle is case sensitive
+#
+# The main difference between MySQL and Oracle queries is the date format.
+# You must use the TO_DATE function to transform the radius date format to
+# the Oracle date format, and put NULL otherwise '0' in a void date field.
+#
+#######################################################################
+
+authorize_check_query = "\
+	SELECT id, UserName, Attribute, Value, op \
+	FROM ${authcheck_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_reply_query = "\
+	SELECT id, UserName, Attribute, Value, op \
+	FROM ${authreply_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_group_check_query = "\
+	SELECT \
+		${groupcheck_table}.id, ${groupcheck_table}.GroupName, ${groupcheck_table}.Attribute, \
+		${groupcheck_table}.Value,${groupcheck_table}.op \
+	FROM ${groupcheck_table}, ${usergroup_table} \
+	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
+	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
+	ORDER BY ${groupcheck_table}.id"
+
+authorize_group_reply_query = "\
+	SELECT \
+		${groupreply_table}.id, ${groupreply_table}.GroupName, ${groupreply_table}.Attribute, \
+		${groupreply_table}.Value, ${groupreply_table}.op \
+	FROM ${groupreply_table}, ${usergroup_table} \
+	WHERE ${usergroup_table}.Username = '%{SQL-User-Name}' \
+	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
+	ORDER BY ${groupreply_table}.id"
+
+#######################################################################
+# Simultaneous Use Checking Queries
+#######################################################################
+# simul_count_query	- query for the number of current connections
+#			- If this is not defined, no simultaneouls use checking
+#			- will be performed by this module instance
+# simul_verify_query	- query to return details of current connections for verification
+#			- Leave blank or commented out to disable verification step
+#			- Note that the returned field order should not be changed.
+#######################################################################
+
+#
+#  Uncomment simul_count_query to enable simultaneous use checking
+#
+#simul_count_query = "\
+#	SELECT COUNT(*) \
+#	FROM ${acct_table1} \
+#	WHERE UserName = '%{SQL-User-Name}' \
+#	AND AcctStopTime IS NULL"
+
+simul_verify_query = "\
+	SELECT \
+		RadAcctId, AcctSessionId, UserName, NASIPAddress, NASPortId, \
+		FramedIPAddress, CallingStationId, FramedProtocol \
+	FROM ${acct_table1} \
+	WHERE UserName='%{SQL-User-Name}' \
+	AND AcctStopTime IS NULL"
+
+#######################################################################
+# Group Membership Queries
+#######################################################################
+# group_membership_query	- Check user group membership
+#######################################################################
+
+group_membership_query = "\
+	SELECT GroupName \
+	FROM ${usergroup_table} \
+	WHERE UserName='%{SQL-User-Name}'"
+
+#######################################################################
+# Accounting and Post-Auth Queries
+#######################################################################
+# These queries insert/update accounting and authentication records.
+# The query to use is determined by the value of 'reference'.
+# This value is used as a configuration path and should resolve to one
+# or more 'query's. If reference points to multiple queries, and a query
+# fails, the next query is executed.
+#
+# Behaviour is identical to the old 1.x/2.x module, except we can now
+# fail between N queries, and query selection can be based on any
+# combination of attributes, or custom 'Acct-Status-Type' values.
+#######################################################################
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#		logfile = ${logdir}/accounting.sql
+
+	type {
+		accounting-on {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStopTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
+					AcctSessionTime = round((TO_DATE('%S','yyyy-mm-dd hh24:mi:ss') - \
+						TO_DATE(TO_CHAR(acctstarttime, 'yyyy-mm-dd hh24:mi:ss'),'yyyy-mm-dd hh24:mi:ss'))*86400), \
+					AcctTerminateCause='%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
+					AcctStopDelay = %{%{Acct-Delay-Time}:-0} \
+				WHERE AcctStopTime IS NULL \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStartTime <= TO_DATE('%S','yyyy-mm-dd hh24:mi:ss')"
+		}
+
+		accounting-off {
+			query = "${..accounting-on.query}"
+		}
+
+		start {
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
+					UserName,		Realm,			NASIPAddress, \
+					NASPortId,		NASPortType,		AcctStartTime, \
+					AcctStopTime,		AcctSessionTime,	AcctAuthentic, \
+					ConnectInfo_start,	ConnectInfo_stop,	AcctInputOctets, \
+					AcctOutputOctets,	CalledStationId,	CallingStationId, \
+					AcctTerminateCause,	ServiceType,		FramedProtocol, \
+					FramedIPAddress,	AcctStartDelay,		AcctStopDelay, \
+					XAscendSessionSvrKey) \
+				VALUES(\
+					'', \
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
+					NULL, \
+					'0', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					'0', \
+					'0', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'%{Acct-Delay-Time}', \
+					'0', \
+					'%{X-Ascend-Session-Svr-Key}')"
+
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStartTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
+					AcctStartDelay = '%{%{Acct-Delay-Time}:-0}', \
+					ConnectInfo_start = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStopTime IS NULL"
+		}
+
+		interim-update {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					FramedIPAddress = NULLIF('%{Framed-IP-Address}', ''), \
+					AcctSessionTime = '%{Acct-Session-Time}', \
+					AcctInputOctets = '%{Acct-Input-Octets}' + \
+						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
+					AcctOutputOctets = '%{Acct-Output-Octets}' +  \
+						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296) \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress= '%{NAS-IP-Address}' \
+				AND AcctStopTime IS NULL"
+
+			query = "\
+				INSERT into ${....acct_table1} \
+					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
+					UserName,		Realm,			NASIPAddress, \
+					NASPortId,		NASPortType,		AcctStartTime, \
+					AcctSessionTime, 	AcctAuthentic,		ConnectInfo_start, \
+					AcctInputOctets,	AcctOutputOctets,	CalledStationId, \
+					CallingStationId,	ServiceType,		FramedProtocol, \
+					FramedIPAddress,	AcctStartDelay,		XAscendSessionSvrKey) \
+				VALUES(\
+					'', \
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					NULL, \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'', \
+					'%{Acct-Input-Octets}' + \
+						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
+					'%{Acct-Output-Octets}' +  \
+						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'0', \
+					'%{X-Ascend-Session-Svr-Key}')"
+		}
+
+		stop {
+			query = "\
+				UPDATE ${....acct_table2} \
+				SET \
+					AcctStopTime = TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
+					AcctSessionTime = '%{Acct-Session-Time}', \
+					AcctInputOctets = '%{Acct-Input-Octets}' + \
+						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
+					AcctOutputOctets = '%{Acct-Output-Octets}' +  \
+						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
+					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
+					AcctStopDelay = '%{%{Acct-Delay-Time}:-0}', \
+					ConnectInfo_stop = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{NAS-IP-Address}' \
+				AND AcctStopTime IS NULL"
+
+			query = "\
+				INSERT into ${....acct_table2} \
+					(RadAcctId,		AcctSessionId,		AcctUniqueId, \
+					 UserName,		Realm,			NASIPAddress, \
+					 NASPortId,		NASPortType,		AcctStartTime, \
+					 AcctStopTime,		AcctSessionTime,	AcctAuthentic, \
+					 ConnectInfo_start,	ConnectInfo_stop,	AcctInputOctets, \
+					 AcctOutputOctets,	CalledStationId,	CallingStationId, \
+					 AcctTerminateCause,	ServiceType,		FramedProtocol, \
+					 FramedIPAddress,	AcctStartDelay,		AcctStopDelay) \
+				VALUES(\
+					'', \
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port-Id}', \
+					'%{NAS-Port-Type}', \
+					NULL, \
+					TO_DATE('%S','yyyy-mm-dd hh24:mi:ss'), \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'', \
+					'%{Connect-Info}', \
+					'%{Acct-Input-Octets}' + \
+						('%{%{Acct-Input-Gigawords}:-0}' * 4294967296), \
+					'%{Acct-Output-Octets}' + \
+						('%{%{Acct-Output-Gigawords}:-0}' * 4294967296), \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Acct-Terminate-Cause}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}', \
+					'0', \
+					'%{%{Acct-Delay-Time}:-0}')"
+
+		}
+	}
+}
+
+#######################################################################
+# Authentication Logging Queries
+#######################################################################
+# postauth_query                - Insert some info after authentication
+#######################################################################
+
+post-auth {
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/post-auth.sql
+	query = "\
+		INSERT INTO ${..postauth_table} \
+			(username, pass, reply, authdate) \
+		VALUES (\
+			'%{User-Name}', \
+			'%{%{User-Password}:-%{Chap-Password}}', \
+			'%{reply:Packet-Type}', \
+			TO_TIMESTAMP('%S','YYYY-MM-DDHH24:MI:SS'))"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql
new file mode 100644
index 0000000..1dcaf7a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/oracle/schema.sql
@@ -0,0 +1,230 @@
+/*
+ * $Id: c11295fa7307a7c05a586f5354dd59de32c059de $
+ *
+ * Oracle schema for FreeRADIUS
+ *
+ *
+ * NOTE: Which columns are NULLable??
+ */
+
+/*
+ * Table structure for table 'radacct'
+ */
+CREATE TABLE radacct (
+	radacctid		INT PRIMARY KEY,
+	acctsessionid		VARCHAR(96) NOT NULL,
+	acctuniqueid		VARCHAR(32),
+	username		VARCHAR(64) NOT NULL,
+	groupname		VARCHAR(32),
+	realm			VARCHAR(30),
+	nasipaddress		VARCHAR(15) NOT NULL,
+	nasportid		VARCHAR(32),
+	nasporttype		VARCHAR(32),
+	acctstarttime		TIMESTAMP WITH TIME ZONE,
+	acctstoptime		TIMESTAMP WITH TIME ZONE,
+	acctsessiontime		NUMERIC(19),
+	acctauthentic		VARCHAR(32),
+	connectinfo_start	VARCHAR(50),
+	connectinfo_stop	VARCHAR(50),
+	acctinputoctets		NUMERIC(19),
+	acctoutputoctets	NUMERIC(19),
+	calledstationid		VARCHAR(50),
+	callingstationid	VARCHAR(50),
+	acctterminatecause	VARCHAR(32),
+	servicetype		VARCHAR(32),
+	framedprotocol		VARCHAR(32),
+	framedipaddress		VARCHAR(15),
+	acctstartdelay		NUMERIC(12),
+	acctstopdelay		NUMERIC(12),
+	XAscendSessionSvrKey	VARCHAR(10)
+);
+
+CREATE UNIUQE INDEX radacct_idx0
+	ON radacct(acctuniqueid);
+CREATE UNIQUE INDEX radacct_idx1
+	ON radacct(acctsessionid,username,acctstarttime,
+		acctstoptime,nasipaddress,framedipaddress);
+
+CREATE SEQUENCE radacct_seq START WITH 1 INCREMENT BY 1;
+
+/* Trigger to emulate a serial # on the primary key */
+CREATE OR REPLACE TRIGGER radacct_serialnumber
+	BEFORE INSERT OR UPDATE OF radacctid ON radacct
+	FOR EACH ROW
+	BEGIN
+		if ( :new.radacctid = 0 or :new.radacctid is null ) then
+			SELECT radacct_seq.nextval into :new.radacctid from dual;
+		end if;
+	END;
+/
+
+/*
+ * Table structure for table 'radcheck'
+ */
+CREATE TABLE radcheck (
+	id 		INT PRIMARY KEY,
+	username	VARCHAR(30) NOT NULL,
+	attribute	VARCHAR(64),
+	op		VARCHAR(2) NOT NULL,
+	value		VARCHAR(40)
+);
+CREATE SEQUENCE radcheck_seq START WITH 1 INCREMENT BY 1;
+
+/* Trigger to emulate a serial # on the primary key */
+CREATE OR REPLACE TRIGGER radcheck_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radcheck
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radcheck_seq.nextval into :new.id from dual;
+		end if;
+	END;
+/
+
+/*
+ * Table structure for table 'radgroupcheck'
+ */
+CREATE TABLE radgroupcheck (
+	id 		INT PRIMARY KEY,
+	groupname	VARCHAR(20) UNIQUE NOT NULL,
+	attribute	VARCHAR(64),
+	op		CHAR(2) NOT NULL,
+	value		VARCHAR(40)
+);
+CREATE SEQUENCE radgroupcheck_seq START WITH 1 INCREMENT BY 1;
+
+/*
+ * Table structure for table 'radgroupreply'
+ */
+CREATE TABLE radgroupreply (
+	id		INT PRIMARY KEY,
+	GroupName	VARCHAR(20) UNIQUE NOT NULL,
+	Attribute	VARCHAR(64),
+	op		CHAR(2) NOT NULL,
+	Value		VARCHAR(40)
+);
+CREATE SEQUENCE radgroupreply_seq START WITH 1 INCREMENT BY 1;
+
+/*
+ * Table structure for table 'radreply'
+ */
+CREATE TABLE radreply (
+	id		INT PRIMARY KEY,
+	UserName	VARCHAR(30) NOT NULL,
+	Attribute	VARCHAR(64),
+	op		CHAR(2) NOT NULL,
+	Value		VARCHAR(40)
+);
+CREATE INDEX radreply_idx1 ON radreply(UserName);
+CREATE SEQUENCE radreply_seq START WITH 1 INCREMENT BY 1;
+
+/* Trigger to emulate a serial # on the primary key */
+CREATE OR REPLACE TRIGGER radreply_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radreply
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radreply_seq.nextval into :new.id from dual;
+		end if;
+	END;
+/
+
+/*
+ * Table structure for table 'radusergroup'
+ */
+CREATE TABLE radusergroup (
+	id		INT PRIMARY KEY,
+	UserName	VARCHAR(30) UNIQUE NOT NULL,
+	GroupName	VARCHAR(30)
+);
+CREATE SEQUENCE radusergroup_seq START WITH 1 INCREMENT BY 1;
+
+/* Trigger to emulate a serial # on the primary key */
+CREATE OR REPLACE TRIGGER radusergroup_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radusergroup
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radusergroup_seq.nextval into :new.id from dual;
+		end if;
+	END;
+/
+
+
+/*
+ * Table structure for table 'realmgroup'
+ */
+CREATE TABLE realmgroup (
+	id 		INT PRIMARY KEY,
+	RealmName	VARCHAR(30) UNIQUE NOT NULL,
+	GroupName	VARCHAR(30)
+);
+CREATE SEQUENCE realmgroup_seq START WITH 1 INCREMENT BY 1;
+
+CREATE TABLE realms (
+	id		INT PRIMARY KEY,
+	realmname	VARCHAR(64),
+	nas		VARCHAR(128),
+	authport	INT,
+	options		VARCHAR(128)
+);
+CREATE SEQUENCE realms_seq START WITH 1 INCREMENT BY 1;
+
+CREATE TABLE radhuntgroup (
+	id              INT PRIMARY KEY,
+	GroupName VARCHAR(64) NOT NULL,
+	Nasipaddress VARCHAR(15) UNIQUE NOT NULL,
+	NASPortID VARCHAR(15)
+);
+
+CREATE SEQUENCE radhuntgroup_seq START WITH 1 INCREMENT BY 1;
+
+CREATE OR REPLACE TRIGGER radhuntgroup_serialnumber
+	BEFORE INSERT OR UPDATE OF id ON radhuntgroup
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radhuntgroup_seq.nextval into :new.id from dual;
+		end if;
+	END;
+
+CREATE TABLE radpostauth (
+	  id            INT PRIMARY KEY,
+	  UserName      VARCHAR(64) NOT NULL,
+	  Pass          VARCHAR(64),
+	  Reply         VARCHAR(64),
+	  AuthDate 	DATE
+);
+
+CREATE SEQUENCE radpostauth_seq START WITH 1 INCREMENT BY 1;
+
+CREATE OR REPLACE TRIGGER radpostauth_TRIG
+	BEFORE INSERT OR UPDATE OF id ON radpostauth
+	FOR EACH ROW
+	BEGIN
+		if ( :new.id = 0 or :new.id is null ) then
+			SELECT radpostauth_seq.nextval into :new.id from dual;
+		end if;
+		if (:new.AuthDate is null) then
+		  select sysdate into :new.AuthDate from dual;
+		end if;
+	END;
+
+/
+
+/*
+ * Table structure for table 'nas'
+ */
+CREATE TABLE nas (
+	id              INT PRIMARY KEY,
+	nasname         VARCHAR(128),
+	shortname       VARCHAR(32),
+	type            VARCHAR(30),
+	ports           INT,
+	secret          VARCHAR(60),
+	server          VARCHAR(64),
+	community       VARCHAR(50),
+	description     VARCHAR(200)
+);
+CREATE SEQUENCE nas_seq START WITH 1 INCREMENT BY 1;
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql
new file mode 100644
index 0000000..7d14c3c
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/cisco_h323_db_schema.sql
@@ -0,0 +1,295 @@
+/*
+ * $Id: ec9731648e83c1e1d4ee39ed24a994ee79bb7dd6 $
+ *
+ * --- Peter Nixon [ codemonkey@peternixon.net ]
+ *
+ * This is a custom SQL schema for doing H323 and SIP VoIP accounting
+ * with FreeRadius and Cisco equipment. It is currently known to work
+ * with 3640, 5300 and 5350 series as well as CSPS (Cisco SIP Proxy
+ * Server).  It will scale A LOT better than the default radius schema
+ * which is designed for simple dialup installations of FreeRadius.
+ *
+ * For this schema to work properly you MUST use
+ * raddb/sql/postgresql/voip-postpaid.conf rather than
+ * raddb/sql/postgresql/dialup.conf
+ *
+ * If you wish to do RADIUS Authentication using the same database,
+ * you MUST use use raddb/sql/postgresql/schema.sql as well as this schema.
+ */
+
+/*
+ * Table structure for 'Start' tables
+ */
+
+CREATE TABLE StartVoIP (
+	RadAcctId		BIGSERIAL PRIMARY KEY,
+	AcctTime		TIMESTAMP with time zone NOT NULL,
+	h323SetupTime		TIMESTAMP with time zone,
+	H323ConnectTime		TIMESTAMP with time zone,
+	UserName		VARCHAR(64),
+	RadiusServerName	VARCHAR(32),
+	NASIPAddress		INET NOT NULL,
+	CalledStationId		VARCHAR(80),
+	CallingStationId	VARCHAR(80),
+	AcctDelayTime		INTEGER,
+	H323GWID		VARCHAR(32),
+	h323CallOrigin		VARCHAR(10),
+	CallID			VARCHAR(80) NOT NULL,
+	processed		BOOLEAN DEFAULT false
+);
+create index startvoipcombo on startvoip (AcctTime, nasipaddress);
+
+
+CREATE TABLE StartTelephony (
+	RadAcctId		BIGSERIAL PRIMARY KEY,
+	AcctTime		TIMESTAMP with time zone NOT NULL,
+	h323SetupTime		TIMESTAMP with time zone,
+	H323ConnectTime		TIMESTAMP with time zone,
+	UserName		VARCHAR(64),
+	RadiusServerName	VARCHAR(32),
+	NASIPAddress		INET NOT NULL,
+	CalledStationId		VARCHAR(80),
+	CallingStationId	VARCHAR(80),
+	AcctDelayTime		INTEGER,
+	H323GWID		VARCHAR(32),
+	h323CallOrigin		VARCHAR(10),
+	CallID			VARCHAR(80) NOT NULL,
+	processed		BOOLEAN DEFAULT false
+);
+create index starttelephonycombo on starttelephony (AcctTime, nasipaddress);
+
+
+
+/*
+ * Table structure for 'Stop' tables
+ */
+CREATE TABLE StopVoIP (
+	RadAcctId		BIGSERIAL PRIMARY KEY,
+	AcctTime		TIMESTAMP with time zone NOT NULL,
+	H323SetupTime		TIMESTAMP with time zone,
+	H323ConnectTime		TIMESTAMP with time zone,
+	H323DisconnectTime	TIMESTAMP with time zone,
+	UserName		VARCHAR(32),
+	RadiusServerName	VARCHAR(32),
+	NASIPAddress		INET NOT NULL,
+	AcctSessionTime		BIGINT,
+	AcctInputOctets		BIGINT,
+	AcctOutputOctets	BIGINT,
+	CalledStationId		VARCHAR(80),
+	CallingStationId	VARCHAR(80),
+	AcctDelayTime		SMALLINT,
+	CiscoNASPort		VARCHAR(1),
+	H323GWID		VARCHAR(32),
+	H323CallOrigin		VARCHAR(10),
+	H323DisconnectCause	VARCHAR(20),
+	H323RemoteAddress	INET,
+	H323VoiceQuality	INTEGER,
+	CallID			VARCHAR(80) NOT NULL,
+	processed		BOOLEAN DEFAULT false
+);
+create UNIQUE index stopvoipcombo on stopvoip (AcctTime, nasipaddress, CallID);
+
+
+CREATE TABLE StopTelephony (
+	RadAcctId		BIGSERIAL PRIMARY KEY,
+	AcctTime		TIMESTAMP with time zone NOT NULL,
+	H323SetupTime		TIMESTAMP with time zone NOT NULL,
+	H323ConnectTime		TIMESTAMP with time zone NOT NULL,
+	H323DisconnectTime	TIMESTAMP with time zone NOT NULL,
+	UserName		VARCHAR(32) DEFAULT '' NOT NULL,
+	RadiusServerName	VARCHAR(32),
+	NASIPAddress		INET NOT NULL,
+	AcctSessionTime		BIGINT,
+	AcctInputOctets		BIGINT,
+	AcctOutputOctets	BIGINT,
+	CalledStationId		VARCHAR(80),
+	CallingStationId	VARCHAR(80),
+	AcctDelayTime		SMALLINT,
+	CiscoNASPort		VARCHAR(16),
+	H323GWID		VARCHAR(32),
+	H323CallOrigin		VARCHAR(10),
+	H323DisconnectCause	VARCHAR(20),
+	H323RemoteAddress	INET,
+	H323VoiceQuality	INTEGER,
+	CallID			VARCHAR(80) NOT NULL,
+	processed		BOOLEAN DEFAULT false
+);
+-- You can have more than one record that is identical except for CiscoNASPort if you have a dial peer hungroup
+-- configured for multiple PRIs.
+create UNIQUE index stoptelephonycombo on stoptelephony (AcctTime, nasipaddress, CallID, CiscoNASPort);
+
+/*
+ * Table structure for 'gateways'
+ *
+ * This table should list the IP addresses, names and locations of all your gateways
+ * This can be used to make more useful reports.
+ *
+ * Note: This table should be removed in favour of using the "nas" table.
+ */
+
+CREATE TABLE gateways (
+	gw_ip		INET NOT NULL,
+	gw_name		VARCHAR(32) NOT NULL,
+	gw_city		VARCHAR(32)
+);
+
+
+/*
+ * Table structure for 'customers'
+ *
+ * This table should list your Customers names and company
+ * This can be used to make more useful reports.
+ */
+
+CREATE TABLE customers (
+	cust_id		SERIAL NOT NULL,
+	company		VARCHAR(32),
+	customer	VARCHAR(32)
+);
+
+/*
+ * Table structure for 'cust_gw'
+ *
+ * This table should list the IP addresses and Customer IDs of all your Customers gateways
+ * This can be used to make more useful reports.
+ */
+
+CREATE TABLE cust_gw (
+	cust_gw		INET PRIMARY KEY,
+	cust_id		INTEGER NOT NULL,
+	"location"	VARCHAR(32)
+);
+
+
+CREATE VIEW customerip AS
+    SELECT gw.cust_gw AS ipaddr, cust.company, cust.customer, gw."location" FROM customers cust, cust_gw gw WHERE (cust.cust_id = gw.cust_id);
+
+
+-- create plpgsql language (You need to be a database superuser to be able to do this)
+CREATE FUNCTION "plpgsql_call_handler" () RETURNS LANGUAGE_HANDLER AS '$libdir/plpgsql' LANGUAGE C;
+CREATE TRUSTED LANGUAGE "plpgsql" HANDLER "plpgsql_call_handler";
+
+/*
+ * Function 'strip_dot'
+ * removes "." from the start of cisco timestamps
+ *
+ * From the cisco website:
+ * "A timestamp that is preceded by an asterisk (*) or a dot (.) may not be accurate.
+ *  An asterisk (*) means that after a gateway reboot, the gateway clock was not manually set
+ *  and the gateway has not synchronized with an NTP server yet. A dot (.) means the gateway
+ *  NTP has lost synchronization with an NTP server."
+ *
+ * We therefore do not bother to strip asterisks (*) from timestamps, as you NEED ntp setup
+ * unless you don't care about billing at all!
+ *
+ *  * Example useage:
+ *      insert into mytable values (strip_dot('.16:46:02.356 EET Wed Dec 11 2002'));
+ *
+ */
+
+
+CREATE OR REPLACE FUNCTION strip_dot (VARCHAR) RETURNS TIMESTAMPTZ AS '
+ DECLARE
+	original_timestamp ALIAS FOR $1;
+ BEGIN
+	IF original_timestamp = '''' THEN
+		RETURN NULL;
+	END IF;
+	IF substring(original_timestamp from 1 for 1) = ''.'' THEN
+		RETURN substring(original_timestamp from 2);
+	ELSE
+		RETURN original_timestamp;
+	END IF;
+ END;
+' LANGUAGE 'plpgsql';
+
+
+CREATE OR REPLACE FUNCTION pick_id (VARCHAR, VARCHAR) RETURNS VARCHAR AS '
+ DECLARE
+	h323confid ALIAS FOR $1;
+	callid ALIAS FOR $2;
+ BEGIN
+	IF h323confid <> '''' THEN
+		RETURN h323confid;
+	END IF;
+	IF callid <> '''' THEN
+		RETURN callid;
+	END IF;
+	RETURN NULL;
+ END;
+' LANGUAGE 'plpgsql';
+
+
+
+/*
+ * Table structure for 'isdn_error_codes' table
+ *
+ * Taken from cisco.com this data can be JOINED against h323DisconnectCause to
+ * give human readable error reports.
+ *
+ */
+
+
+CREATE TABLE isdn_error_codes (
+	error_code	VARCHAR(2) PRIMARY KEY,
+	desc_short	VARCHAR(90),
+	desc_long	TEXT
+);
+
+/*
+ * Data for 'isdn_error_codes' table
+ */
+
+INSERT INTO isdn_error_codes VALUES ('1', 'Unallocated (unassigned) number', 'The ISDN number was sent to the switch in the correct format; however, the number is not assigned to any destination equipment.');
+INSERT INTO isdn_error_codes VALUES ('10', 'Normal call clearing', 'Normal call clearing has occurred.');
+INSERT INTO isdn_error_codes VALUES ('11', 'User busy', 'The called system acknowledges the connection request but is unable to accept the call because all B channels are in use.');
+INSERT INTO isdn_error_codes VALUES ('12', 'No user responding', 'The connection cannot be completed because the destination does not respond to the call.');
+INSERT INTO isdn_error_codes VALUES ('13', 'No answer from user (user alerted)', 'The destination responds to the connection request but fails to complete the connection within the prescribed time. The problem is at the remote end of the connection.');
+INSERT INTO isdn_error_codes VALUES ('15', 'Call rejected', 'The destination is capable of accepting the call but rejected the call for an unknown reason.');
+INSERT INTO isdn_error_codes VALUES ('16', 'Number changed', 'The ISDN number used to set up the call is not assigned to any system.');
+INSERT INTO isdn_error_codes VALUES ('1A', 'Non-selected user clearing', 'The destination is capable of accepting the call but rejected the call because it was not assigned to the user.');
+INSERT INTO isdn_error_codes VALUES ('1B', 'Designation out of order', 'The destination cannot be reached because the interface is not functioning correctly, and a signaling message cannot be delivered. This might be a temporary condition, but it could last for an extended period of time. For example, the remote equipment might be turned off.');
+INSERT INTO isdn_error_codes VALUES ('1C', 'Invalid number format', 'The connection could be established because the destination address was presented in an unrecognizable format or because the destination address was incomplete.');
+INSERT INTO isdn_error_codes VALUES ('1D', 'Facility rejected', 'The facility requested by the user cannot be provided by the network.');
+INSERT INTO isdn_error_codes VALUES ('1E', 'Response to STATUS ENQUIRY', 'The status message was generated in direct response to the prior receipt of a status enquiry message.');
+INSERT INTO isdn_error_codes VALUES ('1F', 'Normal, unspecified', 'Reports the occurrence of a normal event when no standard cause applies. No action required.');
+INSERT INTO isdn_error_codes VALUES ('2', 'No route to specified transit network', 'The ISDN exchange is asked to route the call through an unrecognized intermediate network.');
+INSERT INTO isdn_error_codes VALUES ('22', 'No circuit/channel available', 'The connection cannot be established because no appropriate channel is available to take the call.');
+INSERT INTO isdn_error_codes VALUES ('26', 'Network out of order', 'The destination cannot be reached because the network is not functioning correctly, and the condition might last for an extended period of time. An immediate reconnect attempt will probably be unsuccessful.');
+INSERT INTO isdn_error_codes VALUES ('29', 'Temporary failure', 'An error occurred because the network is not functioning correctly. The problem will be resolved shortly.');
+INSERT INTO isdn_error_codes VALUES ('2A', 'Switching equipment congestion', 'The destination cannot be reached because the network switching equipment is temporarily overloaded.');
+INSERT INTO isdn_error_codes VALUES ('2B', 'Access information discarded', 'The network cannot provide the requested access information.');
+INSERT INTO isdn_error_codes VALUES ('2C', 'Requested circuit/channel not available', 'The remote equipment cannot provide the requested channel for an unknown reason. This might be a temporary problem.');
+INSERT INTO isdn_error_codes VALUES ('2F', 'Resources unavailable, unspecified', 'The requested channel or service is unavailable for an unknown reason. This might be a temporary problem.');
+INSERT INTO isdn_error_codes VALUES ('3', 'No route to destination', 'The call was routed through an intermediate network that does not serve the destination address.');
+INSERT INTO isdn_error_codes VALUES ('31', 'Quality of service unavailable', 'The requested quality of service cannot be provided by the network. This might be a subscription problem.');
+INSERT INTO isdn_error_codes VALUES ('32', 'Requested facility not subscribed', 'The remote equipment supports the requested supplementary service by subscription only.');
+INSERT INTO isdn_error_codes VALUES ('39', 'Bearer capability not authorized', 'The user requested a bearer capability that the network provides, but the user is not authorized to use it. This might be a subscription problem.');
+INSERT INTO isdn_error_codes VALUES ('3A', 'Bearer capability not presently available', 'The network normally provides the requested bearer capability, but it is unavailable at the present time. This might be due to a temporary network problem or to a subscription problem.');
+INSERT INTO isdn_error_codes VALUES ('3F', 'Service or option not available, unspecified', 'The network or remote equipment was unable to provide the requested service option for an unspecified reason. This might be a subscription problem.');
+INSERT INTO isdn_error_codes VALUES ('41', 'Bearer capability not implemented', 'The network cannot provide the bearer capability requested by the user.');
+INSERT INTO isdn_error_codes VALUES ('42', 'Channel type not implemented', 'The network or the destination equipment does not support the requested channel type.');
+INSERT INTO isdn_error_codes VALUES ('45', 'Requested facility not implemented', 'The remote equipment does not support the requested supplementary service.');
+INSERT INTO isdn_error_codes VALUES ('46', 'Only restricted digital information bearer capability is available', 'The network is unable to provide unrestricted digital information bearer capability.');
+INSERT INTO isdn_error_codes VALUES ('4F', 'Service or option not implemented, unspecified', 'The network or remote equipment is unable to provide the requested service option for an unspecified reason. This might be a subscription problem.');
+INSERT INTO isdn_error_codes VALUES ('51', 'Invalid call reference value', 'The remote equipment received a call with a call reference that is not currently in use on the user-network interface.');
+INSERT INTO isdn_error_codes VALUES ('52', 'Identified channel does not exist', 'The receiving equipment is requested to use a channel that is not activated on the interface for calls.');
+INSERT INTO isdn_error_codes VALUES ('53', 'A suspended call exists, but this call identity does not', 'The network received a call resume request. The call resume request contained a Call Identify information element that indicates that the call identity is being used for a suspended call.');
+INSERT INTO isdn_error_codes VALUES ('54', 'Call identity in use', 'The network received a call resume request. The call resume request contained a Call Identify information element that indicates that it is in use for a suspended call.');
+INSERT INTO isdn_error_codes VALUES ('55', 'No call suspended', 'The network received a call resume request when there was not a suspended call pending. This might be a transient error that will be resolved by successive call retries.');
+INSERT INTO isdn_error_codes VALUES ('56', 'Call having the requested call identity has been cleared', 'The network received a call resume request. The call resume request contained a Call Identity information element, which once indicated a suspended call. However, the suspended call was cleared either by timeout or by the remote user.');
+INSERT INTO isdn_error_codes VALUES ('58', 'Incompatible destination', 'Indicates that an attempt was made to connect to non-ISDN equipment. For example, to an analog line.');
+INSERT INTO isdn_error_codes VALUES ('5B', 'Invalid transit network selection', 'The ISDN exchange was asked to route the call through an unrecognized intermediate network.');
+INSERT INTO isdn_error_codes VALUES ('5F', 'Invalid message, unspecified', 'An invalid message was received, and no standard cause applies. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
+INSERT INTO isdn_error_codes VALUES ('6', 'Channel unacceptable', 'The service quality of the specified channel is insufficient to accept the connection.');
+INSERT INTO isdn_error_codes VALUES ('60', 'Mandatory information element is missing', 'The receiving equipment received a message that did not include one of the mandatory information elements. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
+INSERT INTO isdn_error_codes VALUES ('61', 'Message type non-existent or not implemented', 'The receiving equipment received an unrecognized message, either because the message type was invalid or because the message type was valid but not supported. The cause is due to either a problem with the remote configuration or a problem with the local D channel.');
+INSERT INTO isdn_error_codes VALUES ('62', 'Message not compatible with call state or message type non-existent or not implemented', 'The remote equipment received an invalid message, and no standard cause applies. This cause is due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
+INSERT INTO isdn_error_codes VALUES ('63', 'Information element non-existent or not implemented', 'The remote equipment received a message that includes information elements, which were not recognized. This is usually due to a D-channel error. If this error occurs systematically, report it to your ISDN service provider.');
+INSERT INTO isdn_error_codes VALUES ('64', 'Invalid information element contents', 'The remote equipment received a message that includes invalid information in the information element. This is usually due to a D-channel error.');
+INSERT INTO isdn_error_codes VALUES ('65', 'Message not compatible with call state', 'The remote equipment received an unexpected message that does not correspond to the current state of the connection. This is usually due to a D-channel error.');
+INSERT INTO isdn_error_codes VALUES ('66', 'Recovery on timer expires', 'An error-handling (recovery) procedure was initiated by a timer expiry. This is usually a temporary problem.');
+INSERT INTO isdn_error_codes VALUES ('6F', 'Protocol error, unspecified', 'An unspecified D-channel error when no other standard cause applies.');
+INSERT INTO isdn_error_codes VALUES ('7', 'Call awarded and being delivered in an established channel', 'The user is assigned an incoming call that is being connected to an already-established call channel.');
+INSERT INTO isdn_error_codes VALUES ('7F', 'Internetworking, unspecified', 'An event occurred, but the network does not provide causes for the action that it takes. The precise problem is unknown.');
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql
new file mode 100644
index 0000000..270a0c9
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/update_radacct_group.sql
@@ -0,0 +1,37 @@
+/*
+ * $Id: 37f42a0b13515b09f9c7792e8a64b2a3b187e7a3 $
+ *
+ * OPTIONAL Postgresql trigger for FreeRADIUS
+ *
+ * This trigger updates fills in the groupname field (which doesnt come in Accounting packets)
+ * by querying the radusergroup table.
+ * This makes it easier to do group summary reports, however note that it does add some extra
+ * database load to 50% of your SQL accounting queries. If you dont care about group summary
+ * reports then you dont need to install this.
+ *
+ */
+
+
+CREATE OR REPLACE FUNCTION upd_radgroups() RETURNS trigger AS'
+
+DECLARE
+	v_groupname varchar;
+
+BEGIN
+	SELECT INTO v_groupname GroupName FROM radusergroup WHERE CalledStationId = NEW.CalledStationId AND UserName = NEW.UserName;
+	IF FOUND THEN
+		UPDATE radacct SET GroupName = v_groupname WHERE RadAcctId = NEW.RadAcctId;
+	END IF;
+
+	RETURN NEW;
+END
+
+'LANGUAGE plpgsql;
+
+
+DROP TRIGGER upd_radgroups ON radacct;
+
+CREATE TRIGGER upd_radgroups AFTER INSERT ON radacct
+    FOR EACH ROW EXECUTE PROCEDURE upd_radgroups();
+
+
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf
new file mode 100644
index 0000000..6ae361d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/extras/voip-postpaid.conf
@@ -0,0 +1,70 @@
+# -*- text -*-
+##
+## voip-postpaid.conf -- PostgreSQL configuration for H323 VoIP billingx
+##			 (cisco_h323_db_schema.sql)
+##
+##	$Id: 9f1449cc37d80e37025bdfd08fbd4d028aa0c800 $
+
+
+	#######################################################################
+	#  Query config:  Username
+	#######################################################################
+	# This is the username that will get substituted, escaped, and added
+	# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used below
+	# everywhere a username substitution is needed so you you can be sure
+	# the username passed from the client is escaped properly.
+	#
+	#  Uncomment the next line, if you want the sql_user_name to mean:
+	#
+	#    Use Stripped-User-Name, if it's there.
+	#    Else use User-Name, if it's there,
+	#    Else use hard-coded string "none" as the user name.
+	#
+	#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
+	#
+	sql_user_name = "%{User-Name}"
+
+	accounting {
+		reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+
+		# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+		# when used with the rlm_sql_null driver.
+#		logfile = ${logdir}/accounting.sql
+
+		type {
+			start {
+				query = "INSERT INTO ${....acct_table1}%{h323-call-type} \
+						(RadiusServerName, UserName, NASIPAddress, AcctTime, CalledStationId, \
+						 CallingStationId, AcctDelayTime, h323gwid, h323callorigin, \
+						 h323setuptime, H323ConnectTime, callid) \
+					VALUES(\
+						'${radius_server_name}', '%{SQL-User-Name}', \
+						'%{NAS-IP-Address}', now(), '%{Called-Station-Id}', \
+						'%{Calling-Station-Id}', '%{%{Acct-Delay-Time}:-0}', '%{h323-gw-id}', \
+						'%{h323-call-origin}', strip_dot('%{h323-setup-time}'), \
+						strip_dot('%{h323-connect-time}'), pick_id('%{h323-conf-id}', \
+						'%{call-id}'))"
+			}
+
+			stop {
+				query = "INSERT INTO $....acct_table2}%{h323-call-type} \
+						(RadiusServerName, UserName, NASIPAddress, AcctTime, \
+						 AcctSessionTime, AcctInputOctets, AcctOutputOctets, CalledStationId, \
+						 CallingStationId, AcctDelayTime, H323RemoteAddress, H323VoiceQuality, \
+						 CiscoNASPort, h323callorigin, callid, h323connecttime, \
+						 h323disconnectcause, h323disconnecttime, h323gwid, h323setuptime) \
+					VALUES(\
+						'${radius_server_name}', '%{SQL-User-Name}', '%{NAS-IP-Address}', \
+						NOW(),  '%{%{Acct-Session-Time}:-0}', \
+						'%{%{Acct-Input-Octets}:-0}', '%{%{Acct-Output-Octets}:-0}', \
+						'%{Called-Station-Id}', '%{Calling-Station-Id}', \
+						'%{%{Acct-Delay-Time}:-0}', NULLIF('%{h323-remote-address}', '')::inet, \
+						NULLIF('%{h323-voice-quality}','')::integer, \
+						NULLIF('%{Cisco-NAS-Port}', ''), \
+						'%{h323-call-origin}', pick_id('%{h323-conf-id}', '%{call-id}'), \
+						strip_dot('%{h323-connect-time}'), '%{h323-disconnect-cause}', \
+						strip_dot('%{h323-disconnect-time}'), '%{h323-gw-id}', \
+						strip_dot('%{h323-setup-time}'))"
+			}
+		}
+	}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf
new file mode 100644
index 0000000..d5b61cf
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/queries.conf
@@ -0,0 +1,448 @@
+# -*- text -*-
+#
+#  main/postgresql/queries.conf -- PostgreSQL configuration for default schema (schema.sql)
+#
+#  $Id: 0f2a29afff36136bb171a9a97ee90199b017e46c $
+
+# Safe characters list for sql queries. Everything else is replaced
+# with their mime-encoded equivalents.
+# The default list should be ok
+# safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'.  '%{SQL-User-Name}' should be used
+# below everywhere a username substitution is needed so you you can
+# be sure the username passed from the client is escaped properly.
+#
+# Uncomment the next line, if you want the sql_user_name to mean:
+#
+#    Use Stripped-User-Name, if it's there.
+#    Else use User-Name, if it's there,
+#    Else use hard-coded string "none" as the user name.
+#
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-none}}"
+
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+#  Default profile
+#######################################################################
+# This is the default profile. It is found in SQL by group membership.
+# That means that this profile must be a member of at least one group
+# which will contain the corresponding check and reply items.
+# This profile will be queried in the authorize section for every user.
+# The point is to assign all users a default profile without having to
+# manually add each one to a group that will contain the profile.
+# The SQL module will also honor the User-Profile attribute. This
+# attribute can be set anywhere in the authorize section (ie the users
+# file). It is found exactly as the default profile is found.
+# If it is set then it will *overwrite* the default profile setting.
+# The idea is to select profiles based on checks on the incoming
+# packets, not on user group membership. For example:
+# -- users file --
+# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
+# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
+#
+# By default the default_user_profile is not set
+#
+# default_user_profile = "DEFAULT"
+
+#######################################################################
+#  Open Query
+#######################################################################
+# This query is run whenever a new connection is opened.
+# It is commented out by default.
+#
+# If you have issues with connections hanging for too long, uncomment
+# the next line, and set the timeout in milliseconds.  As a general
+# rule, if the queries take longer than a second, something is wrong
+# with the database.
+#open_query = "set statement_timeout to 1000"
+
+#######################################################################
+#  NAS Query
+#######################################################################
+#  This query retrieves the radius clients
+#
+#  0. Row ID (currently unused)
+#  1. Name (or IP address)
+#  2. Shortname
+#  3. Type
+#  4. Secret
+#  5. Server
+#######################################################################
+
+client_query = "\
+	SELECT id, nasname, shortname, type, secret, server \
+	FROM ${client_table}"
+
+#######################################################################
+#  Authorization Queries
+#######################################################################
+#  These queries compare the check items for the user
+#  in ${authcheck_table} and setup the reply items in
+#  ${authreply_table}.  You can use any query/tables
+#  you want, but the return data for each row MUST
+#  be in the  following order:
+#
+#  0. Row ID (currently unused)
+#  1. UserName/GroupName
+#  2. Item Attr Name
+#  3. Item Attr Value
+#  4. Item Attr Operation
+#######################################################################
+
+#
+#  Use these for case insensitive usernames. WARNING: Slower queries!
+#
+#authorize_check_query = "\
+#	SELECT id, UserName, Attribute, Value, Op \
+#	FROM ${authcheck_table} \
+#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
+#	ORDER BY id"
+
+#authorize_reply_query = "\
+#	SELECT id, UserName, Attribute, Value, Op \
+#	FROM ${authreply_table} \
+#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
+#	ORDER BY id"
+
+authorize_check_query = "\
+	SELECT id, UserName, Attribute, Value, Op \
+	FROM ${authcheck_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_reply_query = "\
+	SELECT id, UserName, Attribute, Value, Op \
+	FROM ${authreply_table} \
+	WHERE Username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+#
+#  Use these for case insensitive usernames. WARNING: Slower queries!
+#
+#authorize_group_check_query = "\
+#	SELECT \
+#		${groupcheck_table}.id, ${groupcheck_table}.GroupName, ${groupcheck_table}.Attribute, \
+#		${groupcheck_table}.Value, ${groupcheck_table}.Op \
+#	FROM ${groupcheck_table}, ${usergroup_table} \
+#	WHERE LOWER(${usergroup_table}.UserName) = LOWER('%{SQL-User-Name}') \
+#	AND ${usergroup_table}.GroupName = ${groupcheck_table}.GroupName \
+#	ORDER BY ${groupcheck_table}.id"
+
+#authorize_group_reply_query = "\
+#	SELECT \
+#		${groupreply_table}.id, ${groupreply_table}.GroupName, \
+#		${groupreply_table}.Attribute, ${groupreply_table}.Value, ${groupreply_table}.Op \
+#	FROM ${groupreply_table}, ${usergroup_table} \
+#	WHERE LOWER(${usergroup_table}.UserName) = LOWER('%{SQL-User-Name}') \
+#	AND ${usergroup_table}.GroupName = ${groupreply_table}.GroupName \
+#	ORDER BY ${groupreply_table}.id"
+
+authorize_group_check_query = "\
+	SELECT id, GroupName, Attribute, Value, op \
+	FROM ${groupcheck_table} \
+	WHERE GroupName = '%{Sql-Group}' \
+	ORDER BY id"
+
+authorize_group_reply_query = "\
+	SELECT id, GroupName, Attribute, Value, op \
+	FROM ${groupreply_table} \
+	WHERE GroupName = '%{Sql-Group}' \
+	ORDER BY id"
+
+#######################################################################
+# Simultaneous Use Checking Queries
+#######################################################################
+# simul_count_query     - query for the number of current connections
+#                       - If this is not defined, no simultaneous use checking
+#                       - will be performed by this module instance
+# simul_verify_query    - query to return details of current connections for verification
+#                       - Leave blank or commented out to disable verification step
+#                       - Note that the returned field order should not be changed.
+#######################################################################
+
+#
+#  Uncomment simul_count_query to enable simultaneous use checking
+#
+#simul_count_query = "\
+#	SELECT COUNT(*) \
+#	FROM ${acct_table1} \
+#	WHERE UserName='%{SQL-User-Name}' \
+#	AND AcctStopTime IS NULL"
+
+#simul_verify_query = "\
+#	SELECT RadAcctId, AcctSessionId, UserName, NASIPAddress, NASPortId, FramedIPAddress, CallingStationId, \
+#		FramedProtocol \
+#	FROM ${acct_table1} \
+#	WHERE UserName='%{SQL-User-Name}' \
+#	AND AcctStopTime IS NULL"
+
+#######################################################################
+# Group Membership Queries
+#######################################################################
+# group_membership_query        - Check user group membership
+#######################################################################
+
+# Use these for case insensitive usernames. WARNING: Slower queries!
+#group_membership_query = "\
+#	SELECT GroupName \
+#	FROM ${usergroup_table} \
+#	WHERE LOWER(UserName) = LOWER('%{SQL-User-Name}') \
+#	ORDER BY priority"
+
+group_membership_query = "\
+	SELECT GroupName \
+	FROM ${usergroup_table} \
+	WHERE UserName='%{SQL-User-Name}' \
+	ORDER BY priority"
+
+#######################################################################
+# Accounting and Post-Auth Queries
+#######################################################################
+# These queries insert/update accounting and authentication records.
+# The query to use is determined by the value of 'reference'.
+# This value is used as a configuration path and should resolve to one
+# or more 'query's. If reference points to multiple queries, and a query
+# fails, the next query is executed.
+#
+# Behaviour is identical to the old 1.x/2.x module, except we can now
+# fail between N queries, and query selection can be based on any
+# combination of attributes, or custom 'Acct-Status-Type' values.
+#######################################################################
+accounting {
+	reference = "%{tolower:type.%{%{Acct-Status-Type}:-none}.query}"
+
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/accounting.sql
+
+	column_list = "\
+		AcctSessionId,		AcctUniqueId,		UserName, \
+		Realm,			NASIPAddress,		NASPortId, \
+		NASPortType,		AcctStartTime,		AcctUpdateTime, \
+		AcctStopTime,		AcctSessionTime, 	AcctAuthentic, \
+		ConnectInfo_start,	ConnectInfo_Stop, 	AcctInputOctets, \
+		AcctOutputOctets,	CalledStationId, 	CallingStationId, \
+		AcctTerminateCause,	ServiceType,		FramedProtocol, \
+		FramedIpAddress"
+
+	type {
+		accounting-on {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctSessionTime = (%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime))), \
+					AcctTerminateCause = '%{%{Acct-Terminate-Cause}:-NAS-Reboot}', \
+				WHERE AcctStopTime IS NULL \
+				AND NASIPAddress= '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
+				AND AcctStartTime <= '%S'::timestamp"
+		}
+
+		accounting-off {
+			query = "${..accounting-on.query}"
+		}
+
+		start {
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					NULLIF('%{Realm}', ''), \
+					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
+					%{%{NAS-Port}:-NULL}, \
+					'%{NAS-Port-Type}', \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					NULL, \
+					0, \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					NULL, \
+					0, \
+					0, \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					NULL, \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					NULLIF('%{Framed-IP-Address}', '')::inet)"
+
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStartTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					ConnectInfo_start = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
+				AND AcctStopTime IS NULL"
+
+			# and again where we don't have "AND AcctStopTime IS NULL"
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					AcctStartTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					ConnectInfo_start = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}'"
+		}
+
+		interim-update {
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
+					AcctSessionTime = %{%{Acct-Session-Time}:-NULL}, \
+					AcctInterval = (%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM (COALESCE(AcctUpdateTime, AcctStartTime)))), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Input-Octets}:-0}'::bigint), \
+					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Output-Octets}:-0}'::bigint) \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress= '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
+				AND AcctStopTime IS NULL"
+
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					NULLIF('%{Realm}', ''), \
+					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
+					%{%{NAS-Port}:-NULL}, \
+					'%{NAS-Port-Type}', \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					NULL, \
+					%{%{Acct-Session-Time}:-NULL}, \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					NULL, \
+					(('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Input-Octets}:-0}'::bigint), \
+					(('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Output-Octets}:-0}'::bigint), \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					NULL, \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					NULLIF('%{Framed-IP-Address}', '')::inet)"
+		}
+
+		stop {
+			query = "\
+				UPDATE ${....acct_table2} \
+				SET \
+					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctSessionTime = COALESCE(%{%{Acct-Session-Time}:-NULL}, \
+						(%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime)))), \
+					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Input-Octets}:-0}'::bigint), \
+					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Output-Octets}:-0}'::bigint), \
+					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
+					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
+					ConnectInfo_stop = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}' \
+				AND AcctStopTime IS NULL"
+
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES(\
+					'%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					NULLIF('%{Realm}', ''), \
+					'%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}', \
+					%{%{NAS-Port}:-NULL}, \
+					'%{NAS-Port-Type}', \
+					TO_TIMESTAMP(%{integer:Event-Timestamp} - %{%{Acct-Session-Time}:-0}), \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					NULLIF('%{Acct-Session-Time}', '')::bigint, \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					NULL, \
+					(('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Input-Octets}:-0}'::bigint), \
+					(('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Output-Octets}:-0}'::bigint), \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Acct-Terminate-Cause}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					NULLIF('%{Framed-IP-Address}', '')::inet)"
+
+			# and again where we don't have "AND AcctStopTime IS NULL"
+			query = "\
+				UPDATE ${....acct_table2} \
+				SET \
+					AcctStopTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctUpdateTime = TO_TIMESTAMP(%{integer:Event-Timestamp}), \
+					AcctSessionTime = COALESCE(%{%{Acct-Session-Time}:-NULL}, \
+						(%{integer:Event-Timestamp} - EXTRACT(EPOCH FROM(AcctStartTime)))), \
+					AcctInputOctets = (('%{%{Acct-Input-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Input-Octets}:-0}'::bigint), \
+					AcctOutputOctets = (('%{%{Acct-Output-Gigawords}:-0}'::bigint << 32) + \
+						'%{%{Acct-Output-Octets}:-0}'::bigint), \
+					AcctTerminateCause = '%{Acct-Terminate-Cause}', \
+					FramedIPAddress = NULLIF('%{Framed-IP-Address}', '')::inet, \
+					ConnectInfo_stop = '%{Connect-Info}' \
+				WHERE AcctSessionId = '%{Acct-Session-Id}' \
+				AND UserName = '%{SQL-User-Name}' \
+				AND NASIPAddress = '%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}}'"
+		}
+
+		#
+		#  No Acct-Status-Type == ignore the packet
+		#
+		none {
+		     query = "SELECT true"
+		}
+	}
+}
+
+
+#######################################################################
+# Authentication Logging Queries
+#######################################################################
+# postauth_query                - Insert some info after authentication
+#######################################################################
+
+post-auth {
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/post-auth.sql
+
+	query = "\
+		INSERT INTO ${..postauth_table} \
+			(username, pass, reply, authdate) \
+		VALUES(\
+			'%{User-Name}', \
+			'%{%{User-Password}:-Chap-Password}', \
+			'%{reply:Packet-Type}', \
+			NOW())"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql
new file mode 100644
index 0000000..5d7b439
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/schema.sql
@@ -0,0 +1,178 @@
+/*
+ * $Id: 5ab9d29306cbef460fe310aafd5d2046267cbbdc $
+ *
+ * Postgresql schema for FreeRADIUS
+ *
+ * All field lengths need checking as some are still suboptimal. -pnixon 2003-07-13
+ *
+ */
+
+/*
+ * Table structure for table 'radacct'
+ *
+ * Note: Column type bigserial does not exist prior to Postgres 7.2
+ *       If you run an older version you need to change this to serial
+ */
+CREATE TABLE radacct (
+	RadAcctId		bigserial PRIMARY KEY,
+	AcctSessionId		text NOT NULL,
+	AcctUniqueId		text NOT NULL UNIQUE,
+	UserName		text,
+	GroupName		text,
+	Realm			text,
+	NASIPAddress		inet NOT NULL,
+	NASPortId		text,
+	NASPortType		text,
+	AcctStartTime		timestamp with time zone,
+	AcctUpdateTime		timestamp with time zone,
+	AcctStopTime		timestamp with time zone,
+	AcctInterval		bigint,
+	AcctSessionTime		bigint,
+	AcctAuthentic		text,
+	ConnectInfo_start	text,
+	ConnectInfo_stop	text,
+	AcctInputOctets		bigint,
+	AcctOutputOctets	bigint,
+	CalledStationId		text,
+	CallingStationId	text,
+	AcctTerminateCause	text,
+	ServiceType		text,
+	FramedProtocol		text,
+	FramedIPAddress		inet
+);
+-- This index may be useful..
+-- CREATE UNIQUE INDEX radacct_whoson on radacct (AcctStartTime, nasipaddress);
+
+-- For use by update-, stop- and simul_* queries
+CREATE INDEX radacct_active_user_idx ON radacct (AcctSessionId, UserName, NASIPAddress) WHERE AcctStopTime IS NULL;
+
+-- For use by onoff-
+create INDEX radacct_bulk_close ON radacct (NASIPAddress, AcctStartTime) WHERE AcctStopTime IS NULL;
+
+-- and for common statistic queries:
+CREATE INDEX radacct_start_user_idx ON radacct (AcctStartTime, UserName);
+
+-- and, optionally
+-- CREATE INDEX radacct_stop_user_idx ON radacct (acctStopTime, UserName);
+
+/*
+ * There was WAAAY too many indexes previously. This combo index
+ * should take care of the most common searches.
+ * I have commented out all the old indexes, but left them in case
+ * someone wants them. I don't recomend anywone use them all at once
+ * as they will slow down your DB too much.
+ *  - pnixon 2003-07-13
+ */
+
+/*
+ * create index radacct_UserName on radacct (UserName);
+ * create index radacct_AcctSessionId on radacct (AcctSessionId);
+ * create index radacct_AcctUniqueId on radacct (AcctUniqueId);
+ * create index radacct_FramedIPAddress on radacct (FramedIPAddress);
+ * create index radacct_NASIPAddress on radacct (NASIPAddress);
+ * create index radacct_AcctStartTime on radacct (AcctStartTime);
+ * create index radacct_AcctStopTime on radacct (AcctStopTime);
+*/
+
+
+
+/*
+ * Table structure for table 'radcheck'
+ */
+CREATE TABLE radcheck (
+	id			serial PRIMARY KEY,
+	UserName		text NOT NULL DEFAULT '',
+	Attribute		text NOT NULL DEFAULT '',
+	op			VARCHAR(2) NOT NULL DEFAULT '==',
+	Value			text NOT NULL DEFAULT ''
+);
+create index radcheck_UserName on radcheck (UserName,Attribute);
+/*
+ * Use this index if you use case insensitive queries
+ */
+-- create index radcheck_UserName_lower on radcheck (lower(UserName),Attribute);
+
+/*
+ * Table structure for table 'radgroupcheck'
+ */
+CREATE TABLE radgroupcheck (
+	id			serial PRIMARY KEY,
+	GroupName		text NOT NULL DEFAULT '',
+	Attribute		text NOT NULL DEFAULT '',
+	op			VARCHAR(2) NOT NULL DEFAULT '==',
+	Value			text NOT NULL DEFAULT ''
+);
+create index radgroupcheck_GroupName on radgroupcheck (GroupName,Attribute);
+
+/*
+ * Table structure for table 'radgroupreply'
+ */
+CREATE TABLE radgroupreply (
+	id			serial PRIMARY KEY,
+	GroupName		text NOT NULL DEFAULT '',
+	Attribute		text NOT NULL DEFAULT '',
+	op			VARCHAR(2) NOT NULL DEFAULT '=',
+	Value			text NOT NULL DEFAULT ''
+);
+create index radgroupreply_GroupName on radgroupreply (GroupName,Attribute);
+
+/*
+ * Table structure for table 'radreply'
+ */
+CREATE TABLE radreply (
+	id			serial PRIMARY KEY,
+	UserName		text NOT NULL DEFAULT '',
+	Attribute		text NOT NULL DEFAULT '',
+	op			VARCHAR(2) NOT NULL DEFAULT '=',
+	Value			text NOT NULL DEFAULT ''
+);
+create index radreply_UserName on radreply (UserName,Attribute);
+/*
+ * Use this index if you use case insensitive queries
+ */
+-- create index radreply_UserName_lower on radreply (lower(UserName),Attribute);
+
+/*
+ * Table structure for table 'radusergroup'
+ */
+CREATE TABLE radusergroup (
+	id			serial PRIMARY KEY,
+	UserName		text NOT NULL DEFAULT '',
+	GroupName		text NOT NULL DEFAULT '',
+	priority		integer NOT NULL DEFAULT 0
+);
+create index radusergroup_UserName on radusergroup (UserName);
+/*
+ * Use this index if you use case insensitive queries
+ */
+-- create index radusergroup_UserName_lower on radusergroup (lower(UserName));
+
+--
+-- Table structure for table 'radpostauth'
+--
+
+CREATE TABLE radpostauth (
+	id			bigserial PRIMARY KEY,
+	username		text NOT NULL,
+	pass			text,
+	reply			text,
+	CalledStationId		text,
+	CallingStationId	text,
+	authdate		timestamp with time zone NOT NULL default now()
+);
+
+/*
+ * Table structure for table 'nas'
+ */
+CREATE TABLE nas (
+	id			serial PRIMARY KEY,
+	nasname			text NOT NULL,
+	shortname		text NOT NULL,
+	type			text NOT NULL DEFAULT 'other',
+	ports			integer,
+	secret			text NOT NULL,
+	server			text,
+	community		text,
+	description		text
+);
+create index nas_nasname on nas (nasname);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql
new file mode 100644
index 0000000..17157d0
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/postgresql/setup.sql
@@ -0,0 +1,32 @@
+/*
+ * admin.sql -- PostgreSQL commands for creating the RADIUS user.
+ *
+ *	WARNING: You should change 'localhost' and 'radpass'
+ *		 to something else.  Also update raddb/sql.conf
+ *		 with the new RADIUS password.
+ *
+ *	WARNING: This example file is untested.  Use at your own risk.
+ *		 Please send any bug fixes to the mailing list.
+ *
+ *	$Id: 26d08cae41c788321bdf8fd1b0c41a443b2da6f4 $
+ */
+
+/*
+ *  Create default administrator for RADIUS
+ */
+CREATE USER radius WITH PASSWORD 'radpass';
+
+/*
+ * The server can read any table in SQL
+ */
+GRANT SELECT ON radcheck TO radius;
+GRANT SELECT ON radreply TO radius;
+GRANT SELECT ON radgroupcheck TO radius;
+GRANT SELECT ON radgroupreply TO radius;
+GRANT SELECT ON radusergroup TO radius;
+
+/*
+ * The server can write to the accounting and post-auth logging table.
+ */
+GRANT SELECT, INSERT, UPDATE on radacct TO radius;
+GRANT SELECT, INSERT, UPDATE on radpostauth TO radius;
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf
new file mode 100644
index 0000000..c91f543
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/queries.conf
@@ -0,0 +1,397 @@
+# -*- text -*-
+#
+#  main/sqlite/queries.conf -- SQLite configuration for default schema (schema.sql)
+#
+#  Id: e1e83bf94814ed8be6239977b7bacfed21c0cd6a $
+
+# Safe characters list for sql queries. Everything else is replaced
+# with their mime-encoded equivalents.
+# The default list should be ok
+#safe_characters = "@abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789.-_: /"
+
+#######################################################################
+#  Query config:  Username
+#######################################################################
+# This is the username that will get substituted, escaped, and added
+# as attribute 'SQL-User-Name'. '%{SQL-User-Name}' should be used below
+# everywhere a username substitution is needed so you you can be sure
+# the username passed from the client is escaped properly.
+#
+# Uncomment the next line, if you want the sql_user_name to mean:
+#
+#	Use Stripped-User-Name, if it's there.
+#	Else use User-Name, if it's there,
+#	Else use hard-coded string "DEFAULT" as the user name.
+#sql_user_name = "%{%{Stripped-User-Name}:-%{%{User-Name}:-DEFAULT}}"
+#
+sql_user_name = "%{User-Name}"
+
+#######################################################################
+# Default profile
+#######################################################################
+# This is the default profile. It is found in SQL by group membership.
+# That means that this profile must be a member of at least one group
+# which will contain the corresponding check and reply items.
+# This profile will be queried in the authorize section for every user.
+# The point is to assign all users a default profile without having to
+# manually add each one to a group that will contain the profile.
+# The SQL module will also honor the User-Profile attribute. This
+# attribute can be set anywhere in the authorize section (ie the users
+# file). It is found exactly as the default profile is found.
+# If it is set then it will *overwrite* the default profile setting.
+# The idea is to select profiles based on checks on the incoming packets,
+# not on user group membership. For example:
+# -- users file --
+# DEFAULT	Service-Type == Outbound-User, User-Profile := "outbound"
+# DEFAULT	Service-Type == Framed-User, User-Profile := "framed"
+#
+# By default the default_user_profile is not set
+#
+#default_user_profile = "DEFAULT"
+
+#######################################################################
+# NAS Query
+#######################################################################
+# This query retrieves the radius clients
+#
+# 0. Row ID (currently unused)
+# 1. Name (or IP address)
+# 2. Shortname
+# 3. Type
+# 4. Secret
+# 5. Server
+#######################################################################
+
+client_query = "\
+	SELECT id, nasname, shortname, type, secret, server \
+	FROM ${client_table}"
+
+#######################################################################
+# Authorization Queries
+#######################################################################
+# These queries compare the check items for the user
+# in ${authcheck_table} and setup the reply items in
+# ${authreply_table}. You can use any query/tables
+# you want, but the return data for each row MUST
+# be in the following order:
+#
+# 0. Row ID (currently unused)
+# 1. UserName/GroupName
+# 2. Item Attr Name
+# 3. Item Attr Value
+# 4. Item Attr Operation
+#######################################################################
+
+#
+#  Use these for case sensitive usernames.
+#
+#authorize_check_query = "\
+#	SELECT id, username, attribute, value, op \
+#	FROM ${authcheck_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY id"
+
+#authorize_reply_query = "\
+#	SELECT id, username, attribute, value, op \
+#	FROM ${authreply_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY id"
+
+#
+#  The default queries are case insensitive. (for compatibility with older versions of FreeRADIUS)
+#
+authorize_check_query = "\
+	SELECT id, username, attribute, value, op \
+	FROM ${authcheck_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+authorize_reply_query = "\
+	SELECT id, username, attribute, value, op \
+	FROM ${authreply_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY id"
+
+#
+# Use these for case sensitive usernames.
+#
+#group_membership_query = "\
+#	SELECT groupname \
+#	FROM ${usergroup_table} \
+#	WHERE username = BINARY '%{SQL-User-Name}' \
+#	ORDER BY priority"
+
+group_membership_query = "\
+	SELECT groupname \
+	FROM ${usergroup_table} \
+	WHERE username = '%{SQL-User-Name}' \
+	ORDER BY priority"
+
+authorize_group_check_query = "\
+	SELECT id, groupname, attribute, \
+	Value, op \
+	FROM ${groupcheck_table} \
+	WHERE groupname = '%{Sql-Group}' \
+	ORDER BY id"
+
+authorize_group_reply_query = "\
+	SELECT id, groupname, attribute, \
+	value, op \
+	FROM ${groupreply_table} \
+	WHERE groupname = '%{Sql-Group}' \
+	ORDER BY id"
+
+#######################################################################
+# Simultaneous Use Checking Queries
+#######################################################################
+# simul_count_query	- query for the number of current connections
+#			- If this is not defined, no simultaneouls use checking
+#			- will be performed by this module instance
+# simul_verify_query	- query to return details of current connections
+#				for verification
+#			- Leave blank or commented out to disable verification step
+#			- Note that the returned field order should not be changed.
+#######################################################################
+
+#
+#  Uncomment simul_count_query to enable simultaneous use checking
+#
+#simul_count_query = "\
+#	SELECT COUNT(*) \
+#	FROM ${acct_table1} \
+#	WHERE username = '%{SQL-User-Name}' \
+#	AND acctstoptime IS NULL"
+
+simul_verify_query = "\
+	SELECT radacctid, acctsessionid, username, nasipaddress, nasportid, framedipaddress, \
+		callingstationid, framedprotocol \
+	FROM ${acct_table1} \
+	WHERE username = '%{SQL-User-Name}' \
+	AND acctstoptime IS NULL"
+
+#######################################################################
+# Accounting and Post-Auth Queries
+#######################################################################
+# These queries insert/update accounting and authentication records.
+# The query to use is determined by the value of 'reference'.
+# This value is used as a configuration path and should resolve to one
+# or more 'query's. If reference points to multiple queries, and a query
+# fails, the next query is executed.
+#
+# Behaviour is identical to the old 1.x/2.x module, except we can now
+# fail between N queries, and query selection can be based on any
+# combination of attributes, or custom 'Acct-Status-Type' values.
+#######################################################################
+accounting {
+	reference = "%{tolower:type.%{Acct-Status-Type}.query}"
+
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/accounting.sql
+
+	column_list = "\
+		acctsessionid,		acctuniqueid,		username, \
+		realm,			nasipaddress,		nasportid, \
+		nasporttype,		acctstarttime,		acctupdatetime, \
+		acctstoptime,		acctsessiontime, 	acctauthentic, \
+		connectinfo_start,	connectinfo_stop, 	acctinputoctets, \
+		acctoutputoctets,	calledstationid, 	callingstationid, \
+		acctterminatecause,	servicetype,		framedprotocol, \
+		framedipaddress"
+
+	type {
+		accounting-on {
+			#
+			#  Bulk terminate all sessions associated with a given NAS
+			#
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					acctstoptime = %{%{integer:Event-Timestamp}:-date('now')}, \
+					acctsessiontime	= \
+						%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} \
+						- strftime('%s', acctstarttime)), \
+					acctterminatecause = '%{Acct-Terminate-Cause}' \
+				WHERE acctstoptime IS NULL \
+				AND nasipaddress   = '%{NAS-IP-Address}' \
+				AND acctstarttime <= %{integer:Event-Timestamp}"
+		}
+
+		accounting-off {
+			query = "${..accounting-on.query}"
+		}
+
+		start {
+			#
+			#  Insert a new record into the sessions table
+			#
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					%{%{integer:Event-Timestamp}:-date('now')}, \
+					%{%{integer:Event-Timestamp}:-date('now')}, \
+					NULL, \
+					'0', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					'0', \
+					'0', \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+
+			#
+			#  Key constraints prevented us from inserting a new session,
+			#  use the alternate query to update an existing session.
+			#
+			query = "\
+				UPDATE ${....acct_table1} SET \
+					acctstarttime	= %{%{integer:Event-Timestamp}:-date('now')}, \
+					acctupdatetime	= %{%{integer:Event-Timestamp}:-date('now'))}, \
+					connectinfo_start = '%{Connect-Info}' \
+				WHERE acctsessionid = '%{Acct-Session-Id}' \
+				AND username		= '%{SQL-User-Name}' \
+				AND nasipaddress	= '%{NAS-IP-Address}'"
+		}
+
+		interim-update {
+			#
+			#  Update an existing session and calculate the interval
+			#  between the last data we received for the session and this
+			#  update. This can be used to find stale sessions.
+			#
+			query = "\
+				UPDATE ${....acct_table1} \
+				SET \
+					acctupdatetime  = %{%{integer:Event-Timestamp}:-date('now')}, \
+					acctinterval    = 0, \
+					framedipaddress = '%{Framed-IP-Address}', \
+					acctsessiontime = '%{Acct-Session-Time}', \
+					acctinputoctets = %{%{Acct-Input-Gigawords}:-0} \
+						<< 32 | %{%{Acct-Input-Octets}:-0}, \
+					acctoutputoctets = %{%{Acct-Output-Gigawords}:-0} \
+						<< 32 | %{%{Acct-Output-Octets}:-0} \
+				WHERE acctsessionid     = '%{Acct-Session-Id}' \
+				AND username            = '%{SQL-User-Name}' \
+				AND nasipaddress        = '%{NAS-IP-Address}'"
+
+			#
+			#  The update condition matched no existing sessions. Use
+			#  the values provided in the update to create a new session.
+			#
+			query = "\
+				INSERT INTO ${....acct_table1} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					(%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} - %{%{Acct-Session-Time}:-0}), \
+					%{%{integer:Event-Timestamp}:-date('now')}, \
+					NULL, \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'%{Connect-Info}', \
+					'', \
+					%{%{Acct-Input-Gigawords}:-0} << 32 | \
+						%{%{Acct-Input-Octets}:-0}, \
+					%{%{Acct-Output-Gigawords}:-0} << 32 | \
+						%{%{Acct-Output-Octets}:-0}, \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+		}
+
+		stop {
+			#
+			#  Session has terminated, update the stop time and statistics.
+			#
+			query = "\
+				UPDATE ${....acct_table2} SET \
+					acctstoptime	= %{%{integer:Event-Timestamp}:-date('now')}, \
+					acctsessiontime	= '%{Acct-Session-Time}', \
+					acctinputoctets	= %{%{Acct-Input-Gigawords}:-0} \
+						<< 32 | %{%{Acct-Input-Octets}:-0}, \
+					acctoutputoctets = %{%{Acct-Output-Gigawords}:-0} \
+						<< 32 | %{%{Acct-Output-Octets}:-0}, \
+					acctterminatecause = '%{Acct-Terminate-Cause}', \
+					connectinfo_stop = '%{Connect-Info}' \
+				WHERE acctsessionid 	= '%{Acct-Session-Id}' \
+				AND username		= '%{SQL-User-Name}' \
+				AND nasipaddress	= '%{NAS-IP-Address}'"
+
+			#
+			#  The update condition matched no existing sessions. Use
+			#  the values provided in the update to create a new session.
+			#
+			query = "\
+				INSERT INTO ${....acct_table2} \
+					(${...column_list}) \
+				VALUES \
+					('%{Acct-Session-Id}', \
+					'%{Acct-Unique-Session-Id}', \
+					'%{SQL-User-Name}', \
+					'%{Realm}', \
+					'%{NAS-IP-Address}', \
+					'%{NAS-Port}', \
+					'%{NAS-Port-Type}', \
+					%{%{integer:Event-Timestamp}:-strftime('%s', 'now')} - %{%{Acct-Session-Time}:-0}), \
+					%{%{integer:Event-Timestamp}:-date('now')}, \
+					%{%{integer:Event-Timestamp}:-date('now')}, \
+					'%{Acct-Session-Time}', \
+					'%{Acct-Authentic}', \
+					'', \
+					'%{Connect-Info}', \
+					%{%{Acct-Input-Gigawords}:-0} << 32 | \
+						%{%{Acct-Input-Octets}:-0}, \
+					%{%{Acct-Output-Gigawords}:-0} << 32 | \
+						%{%{Acct-Output-Octets}:-0}, \
+					'%{Called-Station-Id}', \
+					'%{Calling-Station-Id}', \
+					'%{Acct-Terminate-Cause}', \
+					'%{Service-Type}', \
+					'%{Framed-Protocol}', \
+					'%{Framed-IP-Address}')"
+		}
+	}
+}
+
+#######################################################################
+# Authentication Logging Queries
+#######################################################################
+# postauth_query	- Insert some info after authentication
+#######################################################################
+
+post-auth {
+	# Write SQL queries to a logfile. This is potentially useful for bulk inserts
+	# when used with the rlm_sql_null driver.
+#	logfile = ${logdir}/post-auth.sql
+
+	query =	"\
+		INSERT INTO ${..postauth_table} \
+			(username, pass, reply, authdate) \
+		VALUES ( \
+			'%{SQL-User-Name}', \
+			'%{%{User-Password}:-%{Chap-Password}}', \
+			'%{reply:Packet-Type}', \
+			 %{%{integer:Event-Timestamp}:-date('now')})"
+}
diff --git a/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql
new file mode 100644
index 0000000..c2a671e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/sql/main/sqlite/schema.sql
@@ -0,0 +1,137 @@
+-----------------------------------------------------------------------------
+-- $Id: 83a455e620e5ac9603c659697ce9c756c9ccddb1 $                 	   --
+--                                                                         --
+--  schema.sql                       rlm_sql - FreeRADIUS SQLite Module    --
+--                                                                         --
+--     Database schema for SQLite rlm_sql module                           --
+--                                                                         --
+--     To load:                                                            --
+--         mysql -uroot -prootpass radius < schema.sql                     --
+--                                                                         --
+-----------------------------------------------------------------------------
+
+--
+-- Table structure for table 'radacct'
+--
+CREATE TABLE radacct (
+  radacctid bigint(21) PRIMARY KEY,
+  acctsessionid varchar(64) NOT NULL default '',
+  acctuniqueid varchar(32) NOT NULL default '',
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  realm varchar(64) default '',
+  nasipaddress varchar(15) NOT NULL default '',
+  nasportid varchar(15) default NULL,
+  nasporttype varchar(32) default NULL,
+  acctstarttime datetime NULL default NULL,
+  acctupdatetime datetime NULL default NULL,
+  acctstoptime datetime NULL default NULL,
+  acctinterval int(12) default NULL,
+  acctsessiontime int(12) default NULL,
+  acctauthentic varchar(32) default NULL,
+  connectinfo_start varchar(50) default NULL,
+  connectinfo_stop varchar(50) default NULL,
+  acctinputoctets bigint(20) default NULL,
+  acctoutputoctets bigint(20) default NULL,
+  calledstationid varchar(50) NOT NULL default '',
+  callingstationid varchar(50) NOT NULL default '',
+  acctterminatecause varchar(32) NOT NULL default '',
+  servicetype varchar(32) default NULL,
+  framedprotocol varchar(32) default NULL,
+  framedipaddress varchar(15) NOT NULL default ''
+);
+
+CREATE UNIQUE INDEX acctuniqueid ON radacct(acctuniqueid);
+CREATE INDEX username ON radacct(username);
+CREATE INDEX framedipaddress ON radacct (framedipaddress);
+CREATE INDEX acctsessionid ON radacct(acctsessionid);
+CREATE INDEX acctsessiontime ON radacct(acctsessiontime);
+CREATE INDEX acctstarttime ON radacct(acctstarttime);
+CREATE INDEX acctinterval ON radacct(acctinterval);
+CREATE INDEX acctstoptime ON radacct(acctstoptime);
+CREATE INDEX nasipaddress ON radacct(nasipaddress);
+
+--
+-- Table structure for table 'radcheck'
+--
+CREATE TABLE radcheck (
+  id int(11) PRIMARY KEY,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253) NOT NULL default ''
+);
+CREATE INDEX check_username ON radcheck(username);
+
+--
+-- Table structure for table 'radgroupcheck'
+--
+CREATE TABLE radgroupcheck (
+  id int(11) PRIMARY KEY,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '==',
+  value varchar(253)  NOT NULL default ''
+);
+CREATE INDEX check_groupname ON radgroupcheck(groupname);
+
+--
+-- Table structure for table 'radgroupreply'
+--
+CREATE TABLE radgroupreply (
+  id int(11) PRIMARY KEY,
+  groupname varchar(64) NOT NULL default '',
+  attribute varchar(64)  NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253)  NOT NULL default ''
+);
+CREATE INDEX reply_groupname ON radgroupreply(groupname);
+
+--
+-- Table structure for table 'radreply'
+--
+CREATE TABLE radreply (
+  id int(11) PRIMARY KEY,
+  username varchar(64) NOT NULL default '',
+  attribute varchar(64) NOT NULL default '',
+  op char(2) NOT NULL DEFAULT '=',
+  value varchar(253) NOT NULL default ''
+);
+CREATE INDEX reply_username ON radreply(username);
+
+--
+-- Table structure for table 'radusergroup'
+--
+CREATE TABLE radusergroup (
+  username varchar(64) NOT NULL default '',
+  groupname varchar(64) NOT NULL default '',
+  priority int(11) NOT NULL default '1'
+);
+CREATE INDEX usergroup_username ON radusergroup(username);
+
+--
+-- Table structure for table 'radpostauth'
+--
+CREATE TABLE radpostauth (
+  id int(11) PRIMARY KEY,
+  username varchar(64) NOT NULL default '',
+  pass varchar(64) NOT NULL default '',
+  reply varchar(32) NOT NULL default '',
+  authdate timestamp NOT NULL
+);
+
+--
+-- Table structure for table 'nas'
+--
+CREATE TABLE nas (
+  id int(11) PRIMARY KEY,
+  nasname varchar(128) NOT NULL,
+  shortname varchar(32),
+  type varchar(30) DEFAULT 'other',
+  ports int(5),
+  secret varchar(60) DEFAULT 'secret' NOT NULL,
+  server varchar(64),
+  community varchar(50),
+  description varchar(200) DEFAULT 'RADIUS Client'
+);
+CREATE INDEX nasname ON nas(nasname);
diff --git a/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf b/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf
new file mode 100644
index 0000000..9aac368
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-config/unbound/default.conf
@@ -0,0 +1,2 @@
+server:
+ num-threads: 2
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/always b/src/test/setup/radius-config/freeradius/mods-enabled/always
new file mode 120000
index 0000000..2cc1029
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/always
@@ -0,0 +1 @@
+../mods-available/always
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter b/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter
new file mode 120000
index 0000000..400dfd1
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/attr_filter
@@ -0,0 +1 @@
+../mods-available/attr_filter
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap b/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap
new file mode 120000
index 0000000..22cfe44
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/cache_eap
@@ -0,0 +1 @@
+../mods-available/cache_eap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/chap b/src/test/setup/radius-config/freeradius/mods-enabled/chap
new file mode 120000
index 0000000..6ccd392
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/chap
@@ -0,0 +1 @@
+../mods-available/chap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/detail b/src/test/setup/radius-config/freeradius/mods-enabled/detail
new file mode 120000
index 0000000..ad00d0e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/detail
@@ -0,0 +1 @@
+../mods-available/detail
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/dhcp b/src/test/setup/radius-config/freeradius/mods-enabled/dhcp
new file mode 120000
index 0000000..7b16f23
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/dhcp
@@ -0,0 +1 @@
+../mods-available/dhcp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/digest b/src/test/setup/radius-config/freeradius/mods-enabled/digest
new file mode 120000
index 0000000..95d3d36
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/digest
@@ -0,0 +1 @@
+../mods-available/digest
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients b/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients
new file mode 120000
index 0000000..7b030ba
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/dynamic_clients
@@ -0,0 +1 @@
+../mods-available/dynamic_clients
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/eap b/src/test/setup/radius-config/freeradius/mods-enabled/eap
new file mode 120000
index 0000000..37bab92
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/eap
@@ -0,0 +1 @@
+../mods-available/eap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/echo b/src/test/setup/radius-config/freeradius/mods-enabled/echo
new file mode 120000
index 0000000..a436e68
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/echo
@@ -0,0 +1 @@
+../mods-available/echo
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/exec b/src/test/setup/radius-config/freeradius/mods-enabled/exec
new file mode 120000
index 0000000..a42a481
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/exec
@@ -0,0 +1 @@
+../mods-available/exec
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/expiration b/src/test/setup/radius-config/freeradius/mods-enabled/expiration
new file mode 120000
index 0000000..340f641
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/expiration
@@ -0,0 +1 @@
+../mods-available/expiration
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/expr b/src/test/setup/radius-config/freeradius/mods-enabled/expr
new file mode 120000
index 0000000..64dd3ab
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/expr
@@ -0,0 +1 @@
+../mods-available/expr
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/files b/src/test/setup/radius-config/freeradius/mods-enabled/files
new file mode 120000
index 0000000..372bc86
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/files
@@ -0,0 +1 @@
+../mods-available/files
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/linelog b/src/test/setup/radius-config/freeradius/mods-enabled/linelog
new file mode 120000
index 0000000..d6acab4
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/linelog
@@ -0,0 +1 @@
+../mods-available/linelog
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/logintime b/src/test/setup/radius-config/freeradius/mods-enabled/logintime
new file mode 120000
index 0000000..99b698e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/logintime
@@ -0,0 +1 @@
+../mods-available/logintime
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/mschap b/src/test/setup/radius-config/freeradius/mods-enabled/mschap
new file mode 120000
index 0000000..c7523de
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/mschap
@@ -0,0 +1 @@
+../mods-available/mschap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth b/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth
new file mode 120000
index 0000000..3d68f67
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/ntlm_auth
@@ -0,0 +1 @@
+../mods-available/ntlm_auth
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/pap b/src/test/setup/radius-config/freeradius/mods-enabled/pap
new file mode 120000
index 0000000..07f986f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/pap
@@ -0,0 +1 @@
+../mods-available/pap
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/passwd b/src/test/setup/radius-config/freeradius/mods-enabled/passwd
new file mode 120000
index 0000000..be64f8b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/passwd
@@ -0,0 +1 @@
+../mods-available/passwd
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/preprocess b/src/test/setup/radius-config/freeradius/mods-enabled/preprocess
new file mode 120000
index 0000000..266822a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/preprocess
@@ -0,0 +1 @@
+../mods-available/preprocess
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/radutmp b/src/test/setup/radius-config/freeradius/mods-enabled/radutmp
new file mode 120000
index 0000000..e3c390c
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/radutmp
@@ -0,0 +1 @@
+../mods-available/radutmp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/realm b/src/test/setup/radius-config/freeradius/mods-enabled/realm
new file mode 120000
index 0000000..acc66be
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/realm
@@ -0,0 +1 @@
+../mods-available/realm
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/replicate b/src/test/setup/radius-config/freeradius/mods-enabled/replicate
new file mode 120000
index 0000000..b03d8de
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/replicate
@@ -0,0 +1 @@
+../mods-available/replicate
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/soh b/src/test/setup/radius-config/freeradius/mods-enabled/soh
new file mode 120000
index 0000000..af88216
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/soh
@@ -0,0 +1 @@
+../mods-available/soh
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/sql b/src/test/setup/radius-config/freeradius/mods-enabled/sql
new file mode 120000
index 0000000..7024c2d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/sql
@@ -0,0 +1 @@
+/etc/freeradius/mods-available/sql
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp b/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp
new file mode 120000
index 0000000..ac90674
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/sradutmp
@@ -0,0 +1 @@
+../mods-available/sradutmp
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/unix b/src/test/setup/radius-config/freeradius/mods-enabled/unix
new file mode 120000
index 0000000..599fdef
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/unix
@@ -0,0 +1 @@
+../mods-available/unix
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/unpack b/src/test/setup/radius-config/freeradius/mods-enabled/unpack
new file mode 120000
index 0000000..dad4563
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/unpack
@@ -0,0 +1 @@
+../mods-available/unpack
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/mods-enabled/utf8 b/src/test/setup/radius-config/freeradius/mods-enabled/utf8
new file mode 120000
index 0000000..7979255
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/mods-enabled/utf8
@@ -0,0 +1 @@
+../mods-available/utf8
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/panic.gdb b/src/test/setup/radius-config/freeradius/panic.gdb
new file mode 100644
index 0000000..3ae253a
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/panic.gdb
@@ -0,0 +1,4 @@
+info locals
+info args
+thread apply all bt full
+quit
diff --git a/src/test/setup/radius-config/freeradius/policy.d/accounting b/src/test/setup/radius-config/freeradius/policy.d/accounting
new file mode 100644
index 0000000..201f5e5
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/accounting
@@ -0,0 +1,72 @@
+# We check for this prefix to determine whether the class
+# value was generated by this server.  It should be changed
+# so that it is globally unique.
+class_value_prefix = 'ai:'
+
+#
+#	Replacement for the old rlm_acct_unique module
+#
+acct_unique {
+	#
+	#  If we have a class attribute in the format
+	#  'auth_id:[0-9a-f]{32}' it'll have a local value
+	#  (defined by insert_acct_class), this ensures
+	#  uniqueness and suitability.
+	#
+	#  We could just use the Class attribute as
+	#  Acct-Unique-Session-Id, but this may cause problems
+	#  with NAS that carry Class values across between
+	#  multiple linked sessions.  So we rehash class with
+	#  Acct-Session-ID to provide a truely unique session
+	#  identifier.
+	#
+	#  Using a Class/Session-ID combination is more robust
+	#  than using elements in the Accounting-Request,
+	#  which may be subject to change, such as
+	#  NAS-IP-Address, Client-IP-Address and
+	#  NAS-Port-ID/NAS-Port.
+	#
+	#  This policy should ensure that session data is not
+	#  affected if NAS IP addresses change, or the client
+	#  roams to a different 'port' whilst maintaining its
+	#  initial authentication session (Common in a
+	#  wireless environment).
+	#
+	if("%{string:Class}" =~ /${policy.class_value_prefix}([0-9a-f]{32})/i) {
+		update request {
+			Acct-Unique-Session-Id := "%{md5:%{1},%{Acct-Session-ID}}"
+		}
+	}
+
+	#
+	#  Not All devices respect RFC 2865 when dealing with
+	#  the class attribute, so be prepared to use the
+	#  older style of hashing scheme if a class attribute
+	#  is not included
+	#
+	else {
+		update request {
+			Acct-Unique-Session-Id := "%{md5:%{User-Name},%{Acct-Session-ID},%{%{NAS-IPv6-Address}:-%{NAS-IP-Address}},%{NAS-Identifier},%{NAS-Port-ID},%{NAS-Port}}"
+		 }
+	}
+}
+
+#
+#	Insert a (hopefully unique) value into class
+#
+insert_acct_class {
+	update reply {
+		Class = "${policy.class_value_prefix}%{md5:%t,%I,%{Packet-Src-Port},%{Packet-Src-IP-Address},%{NAS-IP-Address},%{Calling-Station-ID},%{User-Name}}"
+	}
+}
+
+#
+#	Merges Acct-[Input|Output]-Octets and Acct-[Input|Output]-Gigawords into Acct-[Input|Output]-Octets64
+#
+acct_counters64.preacct {
+	update request {
+		Acct-Input-Octets64 = "%{expr:(%{%{Acct-Input-Gigawords}:-0} * 4294967296) + %{%{Acct-Input-Octets}:-0}}"
+		Acct-Output-Octets64 = "%{expr:(%{%{Acct-Output-Gigawords}:-0} * 4294967296) + %{%{Acct-Output-Octets}:-0}}"
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/policy.d/canonicalization b/src/test/setup/radius-config/freeradius/policy.d/canonicalization
new file mode 100644
index 0000000..c1cb357
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/canonicalization
@@ -0,0 +1,91 @@
+#
+#	Split User-Name in NAI format (RFC 4282) into components
+#
+#  This policy writes the Username and Domain portions of the
+#  NAI into the Stripped-User-Name and Stripped-User-Domain
+#  attributes.
+#
+#  The regular expression to do this is not strictly compliant
+#  with the standard, but it is not possible to write a
+#  compliant regexp without perl style regular expressions (or
+#  at least not a legible one).
+#
+nai_regexp = "^([^@]*)(@([-[:alnum:]]+\\.[-[:alnum:].]+))?$"
+
+split_username_nai {
+	if(User-Name =~ /${policy.nai_regexp}/){
+		update request {
+			Stripped-User-Name := "%{1}"
+			Stripped-User-Domain = "%{3}"
+		}
+
+		# If any of the expansions result in a null
+		# string, the update section may return
+		# something other than updated...
+		updated
+	}
+	else {
+		noop
+	}
+}
+
+#
+#  If called in post-proxy we modify the proxy-reply message
+#
+split_username_nai.post-proxy {
+	if(proxy-reply:User-Name =~ /${policy.nai_regexp}/){
+		update proxy-reply {
+			Stripped-User-Name := "%{1}"
+			Stripped-User-Domain = "%{3}"
+		}
+		updated
+	}
+	else {
+		noop
+	}
+}
+
+#
+#  Normalize the MAC Addresses in the Calling/Called-Station-Id
+#
+mac-addr-regexp = ([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})
+
+#
+#  Add "rewrite_called_station_id" in the "authorize" and
+#  "preacct" sections.
+#
+rewrite_called_station_id {
+	if(Called-Station-Id =~ /^${policy.mac-addr-regexp}(:(.+))?$/i) {
+		update request {
+			Called-Station-Id := "%{tolower:%{1}-%{2}-%{3}-%{4}-%{5}-%{6}}"
+		}
+
+		# SSID component?
+		if ("%{8}") {
+			update request {
+				Called-Station-SSID := "%{8}"
+			}
+		}
+		updated
+	}
+	else {
+		noop
+	}
+}
+
+#
+#  Add "rewrite_calling_station_id" in the "authorize" and
+#  "preacct" sections.
+#
+rewrite_calling_station_id {
+	if(Calling-Station-Id =~ /^${policy.mac-addr-regexp}$/i) {
+		update request {
+			Calling-Station-Id := "%{tolower:%{1}-%{2}-%{3}-%{4}-%{5}-%{6}}"
+		}
+		updated
+	}
+	else {
+		noop
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/policy.d/control b/src/test/setup/radius-config/freeradius/policy.d/control
new file mode 100644
index 0000000..b056ed1
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/control
@@ -0,0 +1,22 @@
+#
+#  If you want the server to pretend that it is dead,
+#  then use the "do_not_respond" policy.
+#
+do_not_respond {
+	update control {
+		Response-Packet-Type := Do-Not-Respond
+	}
+
+	handled
+}
+
+#
+#  Have the server accept the current request.
+#  Can only be called from authorize.
+#  Unlike calling the always module instance 'reject' the request will continue to be processed.
+#
+accept.authorize {
+	update control {
+		Auth-Type := accept
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/policy.d/cui b/src/test/setup/radius-config/freeradius/policy.d/cui
new file mode 100644
index 0000000..f0302b8
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/cui
@@ -0,0 +1,129 @@
+#
+#  The following policies are for the Chargeable-User-Identity
+#  (CUI) configuration.
+#
+#  The policies below can be called as just 'cui' (not
+#  cui.authorize etc..)  from the various config sections.
+#
+
+#
+#  cui_hash_key definition
+#  This key serves the purpose of protecting CUI values against
+#  dictionary attacks, therefore should be chosen as a "random"
+#  string and kept secret.
+#
+cui_hash_key = "changeme"
+
+#
+# cui_require_operator_name switch
+# If this is set to nonzero value then CUI will only be added
+# when a non-empty Operator-Name value is present in the request
+#
+cui_require_operator_name = "no"
+
+#
+#  The client indicates it can do CUI by sending a CUI attribute
+#  containing one zero byte.
+#  A non-empty value in Operator-Name can be an additional requirement.
+#  Normally CUI support is turned on only for such requests.
+#  CUI support can be used for local clients which do not
+#  supports CUI themselves, the server can simulate a CUI request
+#  adding the missing NUL CUI value and the Operator-Name attribute.
+#  Clients which are supposed to get this treatment should
+#  be marked by add_cui flag in clients.conf
+#  We assume that local clients are marked in the client.conf with
+#  add_cui flag, e.g.
+#  client xxxx {
+#    ...
+#    add_cui = yes
+#  }
+#
+cui.authorize {
+	if ("%{client:add_cui}" == 'yes') {
+		update request {
+			Chargeable-User-Identity := '\\000'
+		}
+	}
+}
+
+#
+#  Before proxing an Access-Request to a remote server, a NUL CUI
+#  attribute should be added, unless it is already present in the request.
+#
+cui.pre-proxy {
+	if (("%{request:Packet-Type}" == 'Access-Request') && ("%{client:add_cui}" == 'yes')) {
+		update proxy-request {
+			Chargeable-User-Identity = '\\000'
+		}
+	}
+}
+
+
+#
+#  Add a CUI attribute based on the User-Name, and a secret key
+#  known only to this server.
+#  For EAP-TTLS and EAP-PEAP methods
+#  use_tunneled_reply parameter MUST be set to yes
+#
+cui.post-auth {
+	if (!control:Proxy-To-Realm && Chargeable-User-Identity && !reply:Chargeable-User-Identity && \
+	    (Operator-Name || ('${policy.cui_require_operator_name}' != 'yes')) ) {
+		update reply {
+			Chargeable-User-Identity = "%{sha1:${policy.cui_hash_key}%{tolower:%{User-Name}%{%{Operator-Name}:-}}}"
+		}
+	}
+
+	update reply {
+		User-Name !* ANY	# remove User-Name from the reply for security
+	}
+
+	#
+	#  The section below will store a CUI for the User in the DB.
+	#  You need to configure the cuisql module and your database for this to work.
+	#  If your NAS can do CUI based accounting themselves or you do not care about
+	#  accounting, comment out the three lines below.
+	#
+	if (reply:Chargeable-User-Identity) {
+		cuisql
+	}
+}
+
+
+cui-inner.post-auth {
+	if (outer.request:Chargeable-User-Identity && \
+	    (outer.request:Operator-Name || ('${policy.cui_require_operator_name}' != 'yes'))) {
+		update reply {
+			Chargeable-User-Identity := "%{sha1:${policy.cui_hash_key}%{tolower:%{User-Name}%{%{outer.request:Operator-Name}:-}}}"
+		}
+	}
+}
+
+#
+#  If your NAS can do CUI based accounting or you do not care about
+#  accounting then just comment out the call to cui in ......
+#
+#  If we had stored a CUI for the User, add it to the request.
+#
+cui.accounting {
+	#
+	#  If the CUI isn't in the packet, see if we can find it
+	#  in the DB.
+	#
+	if (!Chargeable-User-Identity) {
+		update request {
+			Chargeable-User-Identity := "%{cuisql:\
+				SELECT cui FROM cui \
+				WHERE clientipaddress = '%{%{Packet-Src-IPv6-Address}:-%{Packet-Src-IP-Address}}' \
+				AND callingstationid = '%{Calling-Station-Id}' \
+				AND username = '%{User-Name}'}"
+		}
+	}
+
+	#
+	#  If it exists now, then write out when we last saw
+	#  this CUI.
+	#
+	if (Chargeable-User-Identity && (Chargeable-User-Identity != '')) {
+		cuisql
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/policy.d/dhcp b/src/test/setup/radius-config/freeradius/policy.d/dhcp
new file mode 100644
index 0000000..4396f06
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/dhcp
@@ -0,0 +1,25 @@
+#  Assign compatibility data to request for sqlippool
+dhcp_sqlippool.post-auth {
+
+
+	#  Do some minor hacks to the request so that it looks
+	#  like a RADIUS request to the SQL IP Pool module.
+	update request {
+		User-Name = "DHCP-%{DHCP-Client-Hardware-Address}"
+		Calling-Station-Id = "%{DHCP-Client-Hardware-Address}"
+		NAS-IP-Address = "%{%{DHCP-Gateway-IP-Address}:-127.0.0.1}"
+		Acct-Status-Type = Start
+	}
+
+	#  Call the actual module
+	dhcp_sqlippool
+
+	#  Convert Framed-IP-Address to DHCP, but only if we
+	#  actually allocated an address.
+	if (ok) {
+		update reply {
+			DHCP-Your-IP-Address = "%{reply:Framed-IP-Address}"
+		}
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/policy.d/eap b/src/test/setup/radius-config/freeradius/policy.d/eap
new file mode 100644
index 0000000..46d6945
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/eap
@@ -0,0 +1,86 @@
+#
+#	Response caching to handle proxy failovers
+#
+Xeap.authorize {
+	cache_eap
+	if (ok) {
+		#
+		#	Expire previous cache entry
+		#
+		if (control:State) {
+			update control {
+				Cache-TTL := 0
+			}
+			cache_eap
+
+			update control {
+				Cache-TTL !* ANY
+				State !* ANY
+			}
+		}
+
+		handled
+	}
+	else {
+		eap.authorize
+	}
+}
+
+#
+#	Populate cache with responses from the EAP module
+#
+Xeap.authenticate {
+	eap {
+		handled = 1
+	}
+	if (handled) {
+		cache_eap.authorize
+
+		handled
+	}
+
+	cache_eap.authorize
+}
+
+#
+#       Forbid all EAP types.  Enable this by putting "forbid_eap"
+#       into the "authorize" section.
+#
+forbid_eap {
+	if (EAP-Message) {
+		reject
+	}
+}
+
+#
+#       Forbid all non-EAP types outside of an EAP tunnel.
+#
+permit_only_eap {
+	if (!EAP-Message) {
+		#  We MAY be inside of a TTLS tunnel.
+		#  PEAP and EAP-FAST require EAP inside of
+		#  the tunnel, so this check is OK.
+		#  If so, then there MUST be an outer EAP message.
+		if (outer.request && outer.request:EAP-Message) {
+			reject
+		}
+	}
+}
+
+#
+#       Remove Reply-Message from response if were doing EAP
+#
+#  Be RFC 3579 2.6.5 compliant - EAP-Message and Reply-Message should
+#  not be present in the same response.
+#
+remove_reply_message_if_eap {
+	if(reply:EAP-Message && reply:Reply-Message) {
+		update reply {
+			Reply-Message !* ANY
+		}
+	}
+	else {
+		noop
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/policy.d/filter b/src/test/setup/radius-config/freeradius/policy.d/filter
new file mode 100644
index 0000000..c881b3d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/filter
@@ -0,0 +1,93 @@
+#
+#	Example of forbidding all attempts to login via
+#	realms.
+#
+deny_realms {
+	if (User-Name =~ /@|\\/) {
+		reject
+	}
+}
+
+#
+#	Filter the username
+#
+#  Force some sanity on User-Name. This helps to avoid issues
+#  issues where the back-end database is "forgiving" about
+#  what constitutes a user name.
+#
+filter_username {
+	#
+	#  reject mixed case
+	#  e.g. "UseRNaMe"
+	#
+	if (User-Name != "%{tolower:%{User-Name}}") {
+		reject
+	}
+
+	#
+	#  reject all whitespace
+	#  e.g. "user@ site.com", or "us er", or " user", or "user "
+	#
+	if (User-Name =~ / /) {
+		update reply {
+			Reply-Message += "Rejected: Username contains whitespace"
+		}
+		reject
+	}
+
+	#
+	#  reject Multiple @'s
+	#  e.g. "user@site.com@site.com"
+	#
+	if(User-Name =~ /@.*@/ ) {
+		update reply {
+			Reply-Message += "Rejected: Multiple @ in username"
+		}
+		reject
+	}
+
+	#
+	#  reject double dots
+	#  e.g. "user@site..com"
+	#
+	if (User-Name =~ /\\.\\./ ) {
+		update reply {
+			Reply-Message += "Rejected: Username contains ..s"
+		}
+		reject
+	}
+
+	#
+	#  must have at least 1 string-dot-string after @
+	#  e.g. "user@site.com"
+	#
+	if ((User-Name =~ /@/) && (User-Name !~ /@(.+)\\.(.+)$/))  {
+		update reply {
+			Reply-Message += "Rejected: Realm does not have at least one dot separator"
+		}
+		reject
+	}
+
+	#
+	#  Realm ends with a dot
+	#  e.g. "user@site.com."
+	#
+	if (User-Name =~ /\\.$/)  {
+		update reply {
+			Reply-Message += "Rejected: Realm ends with a dot"
+		}
+		reject
+	}
+
+	#
+	#  Realm begins with a dot
+	#  e.g. "user@.site.com"
+	#
+	if (User-Name =~ /@\\./)  {
+		update reply {
+			Reply-Message += "Rejected: Realm begins with a dot"
+		}
+		reject
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/policy.d/operator-name b/src/test/setup/radius-config/freeradius/policy.d/operator-name
new file mode 100644
index 0000000..a16fa1e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/policy.d/operator-name
@@ -0,0 +1,46 @@
+#
+#  The following policies are for the Operator-Name
+#  configuration.
+#
+#  The policies below can be called as just 'operator-name' (not
+#  operator-name.authorize etc..)  from the various config sections.
+#
+
+#  If you require that the Operator-Name be set
+#  for local clients then call the 'operator-name' policy
+#  in the authorize section of the virtual-server for your clients in clients.conf
+
+#  To inject an Operator-Name whilst proxying, call the
+#  'operator-name' policy in the pre-proxy section of the virtual server
+#  No need to call this if you have already enabled this in
+#  the authorize section.
+
+#
+#  We assume that clients can have the operator-name definition
+#  in the client.conf, e.g.
+#  client xxxx {
+#    ...
+#    Operator-Name = 1your.domain
+#  }
+#  If this parameter is found for a client, then we add
+#  an Operator-Name attribute
+#
+operator-name.authorize {
+	if ("%{client:Operator-Name}") {
+		update request {
+			Operator-Name = "%{client:Operator-Name}"
+		}
+	}
+}
+
+#
+# Before proxing the client add an Operator-Name
+# attribute identifying this site if the operator-name is found for this client
+#
+operator-name.pre-proxy {
+	if (("%{request:Packet-Type}" == 'Access-Request') && "%{client:Operator-Name}") {
+		update proxy-request {
+			Operator-Name := "%{client:Operator-Name}"
+		}
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/proxy.conf b/src/test/setup/radius-config/freeradius/proxy.conf
new file mode 100644
index 0000000..0f61067
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/proxy.conf
@@ -0,0 +1,804 @@
+# -*- text -*-
+##
+## proxy.conf -- proxy radius and realm configuration directives
+##
+##	$Id: ae8fedf199ad3ec6197dee75db11769aafa88d07 $
+
+#######################################################################
+#
+#  Proxy server configuration
+#
+#  This entry controls the servers behaviour towards ALL other servers
+#  to which it sends proxy requests.
+#
+proxy server {
+	#
+	#  Note that as of 2.0, the "synchronous", "retry_delay",
+	#  "retry_count", and "dead_time" have all been deprecated.
+	#  For backwards compatibility, they are are still accepted
+	#  by the server, but they ONLY apply to the old-style realm
+	#  configuration.  i.e. realms with "authhost" and/or "accthost"
+	#  entries.
+	#
+	#  i.e. "retry_delay" and "retry_count" have been replaced
+	#  with per-home-server configuration.  See the "home_server"
+	#  example below for details.
+	#
+	#  i.e. "dead_time" has been replaced with a per-home-server
+	#  "revive_interval".  We strongly recommend that this not
+	#  be used, however.  The new method is much better.
+
+	#
+	#  In 2.0, the server is always "synchronous", and setting
+	#  "synchronous = no" is impossible.  This simplifies the
+	#  server and increases the stability of the network.
+	#  However, it means that the server (i.e. proxy) NEVER
+	#  originates packets.  It proxies packets ONLY when it receives
+	#  a packet or a re-transmission from the NAS.  If the NAS never
+	#  re-transmits, the proxy never re-transmits, either.  This can
+	#  affect fail-over, where a packet does *not* fail over to a
+	#  second home server.. because the NAS never retransmits the
+	#  packet.
+	#
+	#  If you need to set "synchronous = no", please send a
+	#  message to the list <freeradius-users@lists.freeradius.org>
+	#  explaining why this feature is vital for your network.
+
+	#
+	#  If a realm exists, but there are no live home servers for
+	#  it, we can fall back to using the "DEFAULT" realm.  This is
+	#  most useful for accounting, where the server can proxy
+	#  accounting requests to home servers, but if they're down,
+	#  use a DEFAULT realm that is LOCAL (i.e. accthost = LOCAL),
+	#  and then store the packets in the "detail" file.  That data
+	#  can be later proxied to the home servers by radrelay, when
+	#  those home servers come back up again.
+
+	#  Setting this to "yes" may have issues for authentication.
+	#  i.e. If you are proxying for two different ISP's, and then
+	#  act as a general dial-up for Gric.  If one of the first two
+	#  ISP's has their RADIUS server go down, you do NOT want to
+	#  proxy those requests to GRIC.  Instead, you probably want
+	#  to just drop the requests on the floor.  In that case, set
+	#  this value to 'no'.
+	#
+	#  allowed values: {yes, no}
+	#
+	default_fallback = no
+
+}
+
+#######################################################################
+#
+#  Configuration for the proxy realms.
+#
+#  As of 2.0. the old-style "realms" file is deprecated, and is not
+#  used by FreeRADIUS.
+#
+#  As of 2.0, the "realm" configuration has changed.  Instead of
+#  specifying "authhost" and "accthost" in a realm section, the home
+#  servers are specified separately in a "home_server" section.  For
+#  backwards compatibility, you can still use the "authhost" and
+#  "accthost" directives.  If you only have one home server for a
+#  realm, it is easier to use the old-style configuration.
+#
+#  However, if you have multiple servers for a realm, we STRONGLY
+#  suggest moving to the new-style configuration.
+#
+#
+#  Load-balancing and failover between home servers is handled via
+#  a "home_server_pool" section.
+#
+#  Finally, The "realm" section defines the realm, some options, and
+#  indicates which server pool should be used for the realm.
+#
+#  This change means that simple configurations now require multiple
+#  sections to define a realm.  However, complex configurations
+#  are much simpler than before, as multiple realms can share the same
+#  server pool.
+#
+#  That is, realms point to server pools, and server pools point to
+#  home servers.  Multiple realms can point to one server pool.  One
+#  server pool can point to multiple home servers.  Each home server
+#  can appear in one or more pools.
+#
+
+######################################################################
+#
+#  This section defines a "Home Server" which is another RADIUS
+#  server that gets sent proxied requests.  In earlier versions
+#  of FreeRADIUS, home servers were defined in "realm" sections,
+#  which was awkward.  In 2.0, they have been made independent
+#  from realms, which is better for a number of reasons.
+#
+home_server localhost {
+	#
+	#  Home servers can be sent Access-Request packets
+	#  or Accounting-Request packets.
+	#
+	#  Allowed values are:
+	#	auth	  - Handles Access-Request packets
+	#	acct	  - Handles Accounting-Request packets
+	#	auth+acct - Handles Access-Request packets at "port",
+	#		    and Accounting-Request packets at "port + 1"
+	#	coa	  - Handles CoA-Request and Disconnect-Request packets.
+	#		    See also raddb/sites-available/originate-coa
+	type = auth
+
+	#
+	#  Configure ONE OF the following entries:
+	#
+	#	IPv4 address
+	#
+	ipaddr = 127.0.0.1
+
+	#	OR IPv6 address
+	# ipv6addr = ::1
+
+	#	OR virtual server
+	# virtual_server = foo
+
+	#	Note that while both ipaddr and ipv6addr will accept
+	#	both addresses and host names, we do NOT recommend
+	#	using host names.  When you specify a host name, the
+	#	server has to do a DNS lookup to find the IP address
+	#	of the home server.  If the DNS server is slow or
+	#	unresponsive, it means that FreeRADIUS will NOT be
+	#	able to determine the address, and will therefore NOT
+	#	start.
+	#
+	#	Also, the mapping of host name to address is done ONCE
+	#	when the server starts.  If DNS is later updated to
+	#	change the address, FreeRADIUS will NOT discover that
+	#	until after a re-start, or a HUP.
+	#
+	#	If you specify a virtual_server here, then requests
+	#	will be proxied internally to that virtual server.
+	#	These requests CANNOT be proxied again, however.  The
+	#	intent is to have the local server handle packets
+	#	when all home servers are dead.
+	#
+	#	Requests proxied to a virtual server will be passed
+	#	through the pre-proxy and post-proxy sections, just
+	#	like any other request.  See also the sample "realm"
+	#	configuration, below.
+	#
+	#	None of the rest of the home_server configuration is used
+	#	for the "virtual_server" configuration.
+
+	#
+	#  The port to which packets are sent.
+	#
+	#  Usually 1812 for type "auth", and  1813 for type "acct".
+	#  Older servers may use 1645 and 1646.
+	#  Use 3799 for type "coa"
+	#
+	port = 1812
+
+	#
+	#  The transport protocol.
+	#
+	#  If unspecified, defaults to "udp", which is the traditional
+	#  RADIUS transport.  It may also be "tcp", in which case TCP
+	#  will be used to talk to this home server.
+	#
+	#  When home servers are put into pools, the pool can contain
+	#  home servers with both UDP and TCP transports.
+	#
+	#proto = udp
+
+	#
+	#  The shared secret use to "encrypt" and "sign" packets between
+	#  FreeRADIUS and the home server.
+	#
+	#  The secret can be any string, up to 8k characters in length.
+	#
+	#  Control codes can be entered vi octal encoding,
+	#	e.g. "\101\102" == "AB"
+	#  Quotation marks can be entered by escaping them,
+	#	e.g. "foo\"bar"
+	#  Spaces or other "special" characters can be entered
+	#  by putting quotes around the string.
+	#	e.g. "foo bar"
+	#	     "foo;bar"
+	#
+	secret = testing123
+
+	############################################################
+	#
+	#  The rest of the configuration items listed here are optional,
+	#  and do not have to appear in every home server definition.
+	#
+	############################################################
+
+	#
+	#  You can optionally specify the source IP address used when
+	#  proxying requests to this home server.  When the src_ipaddr
+	#  it set, the server will automatically create a proxy
+	#  listener for that IP address.
+	#
+	#  If you specify this field for one home server, you will
+	#  likely need to specify it for ALL home servers.
+	#
+	#  If you don't care about the source IP address, leave this
+	#  entry commented.
+	#
+#	src_ipaddr = 127.0.0.1
+
+	#
+	#  If the home server does not respond to a request within
+	#  this time, this server will initiate "zombie_period".
+	#
+	#  The response window is large because responses MAY be slow,
+	#  especially when proxying across the Internet.
+	#
+	#  Useful range of values: 5 to 60
+	response_window = 20
+
+	#
+	#  If you want the old behaviour of the server rejecting
+	#  proxied requests after "response_window" timeout, set
+	#  the following configuration item to "yes".
+	#
+	#  This configuration WILL be removed in a future release
+	#  If you believe you need it, email the freeradius-users
+	#  list, and explain why it should stay in the server.
+	#
+#	no_response_fail = no
+
+	#
+	#  If the home server does not respond to ANY packets during
+	#  the "zombie period", it will be considered to be dead.
+	#
+	#  A home server that is marked "zombie" will be used for
+	#  proxying as a low priority.  If there are live servers,
+	#  they will always be preferred to a zombie.  Requests will
+	#  be proxied to a zombie server ONLY when there are no
+	#  live servers.
+	#
+	#  Any request that is proxied to a home server will continue
+	#  to be sent to that home server until the home server is
+	#  marked dead.  At that point, it will fail over to another
+	#  server, if a live server is available.  If none is available,
+	#  then the "post-proxy-type fail" handler will be called.
+	#
+	#  If "status_check" below is something other than "none", then
+	#  the server will start sending status checks at the start of
+	#  the zombie period.  It will continue sending status checks
+	#  until the home server is marked "alive".
+	#
+	#  Useful range of values: 20 to 120
+	zombie_period = 40
+
+	############################################################
+	#
+	#  As of 2.0, FreeRADIUS supports RADIUS layer "status
+	#  checks".  These are used by a proxy server to see if a home
+	#  server is alive.
+	#
+	#  These status packets are sent ONLY if the proxying server
+	#  believes that the home server is dead.  They are NOT sent
+	#  if the proxying server believes that the home server is
+	#  alive.  They are NOT sent if the proxying server is not
+	#  proxying packets.
+	#
+	#  If the home server responds to the status check packet,
+	#  then it is marked alive again, and is returned to use.
+	#
+	############################################################
+
+	#
+	#  Some home servers do not support status checks via the
+	#  Status-Server packet.  Others may not have a "test" user
+	#  configured that can be used to query the server, to see if
+	#  it is alive.  For those servers, we have NO WAY of knowing
+	#  when it becomes alive again.  Therefore, after the server
+	#  has been marked dead, we wait a period of time, and mark
+	#  it alive again, in the hope that it has come back to
+	#  life.
+	#
+	#  If it has NOT come back to life, then FreeRADIUS will wait
+	#  for "zombie_period" before marking it dead again.  During
+	#  the "zombie_period", ALL AUTHENTICATIONS WILL FAIL, because
+	#  the home server is still dead.  There is NOTHING that can
+	#  be done about this, other than to enable the status checks,
+	#  as documented below.
+	#
+	#  e.g. if "zombie_period" is 40 seconds, and "revive_interval"
+	#  is 300 seconds, the for 40 seconds out of every 340, or about
+	#  10% of the time, all authentications will fail.
+	#
+	#  If the "zombie_period" and "revive_interval" configurations
+	#  are set smaller, than it is possible for up to 50% of
+	#  authentications to fail.
+	#
+	#  As a result, we recommend enabling status checks, and
+	#  we do NOT recommend using "revive_interval".
+	#
+	#  The "revive_interval" is used ONLY if the "status_check"
+	#  entry below is "none".  Otherwise, it will not be used,
+	#  and should be deleted.
+	#
+	#  Useful range of values: 60 to 3600
+	revive_interval = 120
+
+	#
+	#  The proxying server (i.e. this one) can do periodic status
+	#  checks to see if a dead home server has come back alive.
+	#
+	#  If set to "none", then the other configuration items listed
+	#  below are not used, and the "revive_interval" time is used
+	#  instead.
+	#
+	#  If set to "status-server", the Status-Server packets are
+	#  sent.  Many RADIUS servers support Status-Server.  If a
+	#  server does not support it, please contact the server
+	#  vendor and request that they add it.
+	#
+	#  If set to "request", then Access-Request, or Accounting-Request
+	#  packets are sent, depending on the "type" entry above (auth/acct).
+	#
+	#  Allowed values: none, status-server, request
+	status_check = status-server
+
+	#
+	#  If the home server does not support Status-Server packets,
+	#  then the server can still send Access-Request or
+	#  Accounting-Request packets, with a pre-defined user name.
+	#
+	#  This practice is NOT recommended, as it may potentially let
+	#  users gain network access by using these "test" accounts!
+	#
+	#  If it is used, we recommend that the home server ALWAYS
+	#  respond to these Access-Request status checks with
+	#  Access-Reject.  The status check just needs an answer, it
+	#  does not need an Access-Accept.
+	#
+	#  For Accounting-Request status checks, only the username
+	#  needs to be set.  The rest of the accounting attribute are
+	#  set to default values.  The home server that receives these
+	#  accounting packets SHOULD NOT treat them like normal user
+	#  accounting packets.  i.e It should probably NOT log them to
+	#  a database.
+	#
+	# username = "test_user_please_reject_me"
+	# password = "this is really secret"
+
+	#
+	#  Configure the interval between sending status check packets.
+	#
+	#  Setting it too low increases the probability of spurious
+	#  fail-over and fallback attempts.
+	#
+	#  Useful range of values: 6 to 120
+	check_interval = 30
+
+	#
+	#  Configure the number of status checks in a row that the
+	#  home server needs to respond to before it is marked alive.
+	#
+	#  If you want to mark a home server as alive after a short
+	#  time period of being responsive, it is best to use a small
+	#  "check_interval", and a large value for
+	#  "num_answers_to_alive".  Using a long "check_interval" and
+	#  a small number for "num_answers_to_alive" increases the
+	#  probability of spurious fail-over and fallback attempts.
+	#
+	#  Useful range of values: 3 to 10
+	num_answers_to_alive = 3
+
+	#
+	#  Limit the total number of outstanding packets to the home
+	#  server.
+	#
+	#  if ((#request sent) - (#requests received)) > max_outstanding
+	#	then stop sending more packets to the home server
+	#
+	#  This lets us gracefully fall over when the home server
+	#  is overloaded.
+	max_outstanding = 65536
+
+	#
+	#  The configuration items in the next sub-section are used ONLY
+	#  when "type = coa".  It is ignored for all other type of home
+	#  servers.
+	#
+	#  See RFC 5080 for the definitions of the following terms.
+	#  RAND is a function (internal to FreeRADIUS) returning
+	#  random numbers between -0.1 and +0.1
+	#
+	#  First Re-transmit occurs after:
+	#
+	#	 RT = IRT + RAND*IRT
+	#
+	#  Subsequent Re-transmits occur after:
+	#
+	#	RT = 2 * RTprev + RAND * RTprev
+	#
+	#  Re-transmits are capped at:
+	#
+	#	if (MRT && (RT > MRT)) RT = MRT + RAND * MRT
+	#
+	#  For a maximum number of attempts: MRC
+	#
+	#  For a maximum (total) period of time: MRD.
+	#
+	coa {
+		# Initial retransmit interval: 1..5
+		irt = 2
+
+		# Maximum Retransmit Timeout: 1..30 (0 == no maximum)
+		mrt = 16
+
+		# Maximum Retransmit Count: 1..20 (0 == retransmit forever)
+		mrc = 5
+
+		# Maximum Retransmit Duration: 5..60
+		mrd = 30
+	}
+
+	#
+	#  Connection limiting for home servers with "proto = tcp".
+	#
+	#  This section is ignored for other home servers.
+	#
+	limit {
+	      #
+	      #  Limit the number of TCP connections to the home server.
+	      #
+	      #  The default is 16.
+	      #  Setting this to 0 means "no limit"
+	      max_connections = 16
+
+	      #
+	      #  Limit the total number of requests sent over one
+	      #  TCP connection.  After this number of requests, the
+	      #  connection will be closed.  Any new packets that are
+	      #  proxied to the home server will result in a new TCP
+	      #  connection being made.
+	      #
+	      #  Setting this to 0 means "no limit"
+	      max_requests = 0
+
+	      #
+	      #  The lifetime, in seconds, of a TCP connection.  After
+	      #  this lifetime, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "forever".
+	      lifetime = 0
+
+	      #
+	      #  The idle timeout, in seconds, of a TCP connection.
+	      #  If no packets have been sent over the connection for
+	      #  this time, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "no timeout".
+	      idle_timeout = 0
+	}
+
+}
+
+# Sample virtual home server.
+#
+#
+#home_server virtual.example.com {
+#	    virtual_server = virtual.example.com
+#}
+
+######################################################################
+#
+#  This section defines a pool of home servers that is used
+#  for fail-over and load-balancing.  In earlier versions of
+#  FreeRADIUS, fail-over and load-balancing were defined per-realm.
+#  As a result, if a server had 5 home servers, each of which served
+#  the same 10 realms, you would need 50 "realm" entries.
+#
+#  In version 2.0, you would need 5 "home_server" sections,
+#  10 'realm" sections, and one "home_server_pool" section to tie the
+#  two together.
+#
+home_server_pool my_auth_failover {
+	#
+	#  The type of this pool controls how home servers are chosen.
+	#
+	#  fail-over - the request is sent to the first live
+	#  	home server in the list.  i.e. If the first home server
+	#	is marked "dead", the second one is chosen, etc.
+	#
+	#  load-balance - the least busy home server is chosen,
+	#	where "least busy" is counted by taking the number of
+	#	requests sent to that home server, and subtracting the
+	#	number of responses received from that home server.
+	#
+	#	If there are two or more servers with the same low
+	#	load, then one of those servers is chosen at random.
+	#	This configuration is most similar to the old
+	#	"round-robin" method, though it is not exactly the same.
+	#
+	#	Note that load balancing does not work well with EAP,
+	#	as EAP requires packets for an EAP conversation to be
+	#	sent to the same home server.  The load balancing method
+	#	does not keep state in between packets, meaning that
+	#	EAP packets for the same conversation may be sent to
+	#	different home servers.  This will prevent EAP from
+	#	working.
+	#
+	#	For non-EAP authentication methods, and for accounting
+	#	packets, we recommend using "load-balance".  It will
+	#	ensure the highest availability for your network.
+	#
+	#  client-balance - the home server is chosen by hashing the
+	#	source IP address of the packet.  If that home server
+	#	is down, the next one in the list is used, just as
+	#	with "fail-over".
+	#
+	#	There is no way of predicting which source IP will map
+	#	to which home server.
+	#
+	#	This configuration is most useful to do simple load
+	#	balancing for EAP sessions, as the EAP session will
+	#	always be sent to the same home server.
+	#
+	#  client-port-balance - the home server is chosen by hashing
+	#	the source IP address and source port of the packet.
+	#	If that home server is down, the next one in the list
+	#	is used, just as with "fail-over".
+	#
+	#	This method provides slightly better load balancing
+	#	for EAP sessions than "client-balance".  However, it
+	#	also means that authentication and accounting packets
+	#	for the same session MAY go to different home servers.
+	#
+	#  keyed-balance - the home server is chosen by hashing (FNV)
+	#	the contents of the Load-Balance-Key attribute from the
+	#	control items.  The  request is then sent to home server
+	#	chosen by taking:
+	#
+	#		server = (hash % num_servers_in_pool).
+	#
+	#	If there is no Load-Balance-Key in the control items,
+	#	the load balancing method is identical to "load-balance".
+	#
+	#	For most non-EAP authentication methods, The User-Name
+	#	attribute provides a good key.  An "unlang" policy can
+	#	be used to copy the User-Name to the Load-Balance-Key
+	#	attribute.  This method may not work for EAP sessions,
+	#	as the User-Name outside of the TLS tunnel is often
+	#	static, e.g. "anonymous@realm".
+	#
+	#
+	#  The default type is fail-over.
+	type = fail-over
+
+	#
+	#  A virtual_server may be specified here.  If so, the
+	#  "pre-proxy" and "post-proxy" sections are called when
+	#  the request is proxied, and when a response is received.
+	#
+	#  This lets you have one policy for all requests that are proxied
+	#  to a home server.  This policy is completely independent of
+	#  any policies used to receive, or process the request.
+	#
+	#virtual_server = pre_post_proxy_for_pool
+
+	#
+	#  Next, a list of one or more home servers.  The names
+	#  of the home servers are NOT the hostnames, but the names
+	#  of the sections.  (e.g. home_server foo {...} has name "foo".
+	#
+	#  Note that ALL home servers listed here have to be of the same
+	#  type.  i.e. they all have to be "auth", or they all have to
+	#  be "acct", or the all have to be "auth+acct".
+	#
+	home_server = localhost
+
+	#  Additional home servers can be listed.
+	#  There is NO LIMIT to the number of home servers that can
+	#  be listed, though using more than 10 or so will become
+	#  difficult to manage.
+	#
+	# home_server = foo.example.com
+	# home_server = bar.example.com
+	# home_server = baz.example.com
+	# home_server = ...
+
+
+	#
+	#  If ALL home servers are dead, then this "fallback" home server
+	#  is used.  If set, it takes precedence over any realm-based
+	#  fallback, such as the DEFAULT realm.
+	#
+	#  For reasons of stability, this home server SHOULD be a virtual
+	#  server.  Otherwise, the fallback may itself be dead!
+	#
+	#fallback = virtual.example.com
+}
+
+######################################################################
+#
+#
+#  This section defines a new-style "realm".  Note the in version 2.0,
+#  there are many fewer configuration items than in 1.x for a realm.
+#
+#  Automatic proxying is done via the "realms" module (see "man
+#  rlm_realm").  To manually proxy the request put this entry in the
+#  "users" file:
+
+#
+#
+#DEFAULT	Proxy-To-Realm := "realm_name"
+#
+#
+realm example.com {
+	#
+	#  Realms point to pools of home servers.
+#
+	#  For authentication, the "auth_pool" configuration item
+	#  should point to a "home_server_pool" that was previously
+	#  defined.  All of the home servers in the "auth_pool" must
+	#  be of type "auth".
+	#
+	#  For accounting, the "acct_pool" configuration item
+	#  should point to a "home_server_pool" that was previously
+	#  defined.  All of the home servers in the "acct_pool" must
+	#  be of type "acct".
+	#
+	#  If you have a "home_server_pool" where all of the home servers
+	#  are of type "auth+acct", you can just use the "pool"
+	#  configuration item, instead of specifying both "auth_pool"
+	#  and "acct_pool".
+
+	auth_pool = my_auth_failover
+#	acct_pool = acct
+
+	#  As of Version 3.0, the server can proxy CoA packets
+	#  based on the Operator-Name attribute.  This requires
+	#  that the "suffix" module be listed in the "recv-coa"
+	#  section.
+	#
+	#  See raddb/sites-available/coa
+	#
+#	coa_pool = name_of_coa_pool
+
+	#
+	#  Normally, when an incoming User-Name is matched against the
+	#  realm, the realm name is "stripped" off, and the "stripped"
+	#  user name is used to perform matches.
+	#
+	#  e.g. User-Name = "bob@example.com" will result in two new
+	#  attributes being created by the "realms" module:
+	#
+	#	Stripped-User-Name = "bob"
+	#	Realm = "example.com"
+	#
+	#  The Stripped-User-Name is then used as a key in the "users"
+	#  file, for example.
+	#
+	#  If you do not want this to happen, uncomment "nostrip" below.
+	#
+	# nostrip
+
+	#  There are no more configuration entries for a realm.
+}
+
+
+#
+#  This is a sample entry for iPass.
+#  Note that you have to define "ipass_auth_pool" and
+#  "ipass_acct_pool", along with home_servers for them, too.
+#
+#realm IPASS {
+#	nostrip
+#
+#	auth_pool = ipass_auth_pool
+#	acct_pool = ipass_acct_pool
+#}
+
+#
+#  This realm is used mainly to cancel proxying.  You can have
+#  the "realm suffix" module configured to proxy all requests for
+#  a realm, and then later cancel the proxying, based on other
+#  configuration.
+#
+#  For example, you want to terminate PEAP or EAP-TTLS locally,
+#  you can add the following to the "users" file:
+#
+#  DEFAULT EAP-Type == PEAP, Proxy-To-Realm := LOCAL
+#
+realm LOCAL {
+	#  If we do not specify a server pool, the realm is LOCAL, and
+	#  requests are not proxied to it.
+}
+
+#
+#  This realm is for requests which don't have an explicit realm
+#  prefix or suffix.  User names like "bob" will match this one.
+#
+#realm NULL {
+#	authhost	= radius.company.com:1600
+#	accthost	= radius.company.com:1601
+#	secret		= testing123
+#}
+
+#
+#  This realm is for ALL OTHER requests.
+#
+#realm DEFAULT {
+#	authhost	= radius.company.com:1600
+#	accthost	= radius.company.com:1601
+#	secret		= testing123
+#}
+
+
+#  This realm "proxies" requests internally to a virtual server.
+#  The pre-proxy and post-proxy sections are run just as with any
+#  other kind of home server.  The virtual server then receives
+#  the request, and replies, just as with any other packet.
+#
+#  Once proxied internally like this, the request CANNOT be proxied
+#  internally or externally.
+#
+#realm virtual.example.com {
+#	virtual_server = virtual.example.com
+#}
+#
+
+#
+#  Regular expressions may also be used as realm names.  If these are used,
+#  then the "find matching realm" process is as follows:
+#
+#    1) Look for a non-regex realm with an *exact* match for the name.
+#       If found, it is used in preference to any regex matching realm.
+#
+#    2) Look for a regex realm, in the order that they are listed
+#       in the configuration files.  Any regex match is performed in
+#	a case-insensitive fashion.
+#
+#    3) If no realm is found, return the DEFAULT realm, if any.
+#
+#  The order of the realms matters in step (2).  For example, defining
+#  two realms ".*\.example.net$" and ".*\.test\.example\.net$" will result in
+#  the second realm NEVER matching.  This is because all of the realms
+#  which match the second regex also match the first one.  Since the
+#  first regex matches, it is returned.
+#
+#  The solution is to list the realms in the opposite order,. e.g.
+#  ".*\.test\.example.net$", followed by ".*\.example\.net$".
+#
+#
+#  Some helpful rules:
+#
+#   - always place a '~' character at the start of the realm name.
+#     This signifies that it is a regex match, and not an exact match
+#     for the realm.
+#
+#   - place the regex in double quotes.  This helps the configuration
+#     file parser ignore any "special" characters in the regex.
+#     Yes, this rule is different than the normal "unlang" rules for
+#     regular expressions.  That may be fixed in a future release.
+#
+#   - use two back-slashes '\\' whenever you need one backslash in the
+#     regex.  e.g. "~.*\\.example\\.net$", and not "~\.example\.net$".
+#     This is because the regex is in a double-quoted string, and normal
+#     rules apply for double-quoted strings.
+#
+#   - If you are matching domain names, use two backslashes in front of
+#     every '.' (dot or period).  This is because '.' has special meaning
+#     in a regular expression: match any character.  If you do not do this,
+#     then "~.*.example.net$" will match "fooXexampleYnet", which is likely
+#     not what you want
+#
+#   - If you are matching domain names, put a '$' at the end of the regex
+#     that matches the domain name.  This tells the regex matching code
+#     that the realm ENDS with the domain name, so it does not match
+#     realms with the domain name in the middle.  e.g. "~.*\\.example\\.net"
+#     will match "test.example.netFOO", which is likely not what you want.
+#     Using "~(.*\\.)example\\.net$" is better.
+#
+#  The more regex realms that are defined, the more time it takes to
+#  process them.  You should define as few regex realms as possible
+#  in order to maximize server performance.
+#
+#realm "~(.*\\.)*example\\.net$" {
+#      auth_pool = my_auth_failover
+#}
diff --git a/src/test/setup/radius-config/freeradius/radiusd.conf b/src/test/setup/radius-config/freeradius/radiusd.conf
new file mode 100644
index 0000000..327b10b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/radiusd.conf
@@ -0,0 +1,772 @@
+# -*- text -*-
+##
+## radiusd.conf	-- FreeRADIUS server configuration file - 3.0.3
+##
+##	http://www.freeradius.org/
+##	$Id: 307ae108f579b9c339e6ba819387ff7ad8baff87 $
+##
+
+######################################################################
+#
+#	Read "man radiusd" before editing this file.  See the section
+#	titled DEBUGGING.  It outlines a method where you can quickly
+#	obtain the configuration you want, without running into
+#	trouble.
+#
+#	Run the server in debugging mode, and READ the output.
+#
+#		$ radiusd -X
+#
+#	We cannot emphasize this point strongly enough.  The vast
+#	majority of problems can be solved by carefully reading the
+#	debugging output, which includes warnings about common issues,
+#	and suggestions for how they may be fixed.
+#
+#	There may be a lot of output, but look carefully for words like:
+#	"warning", "error", "reject", or "failure".  The messages there
+#	will usually be enough to guide you to a solution.
+#
+#	If you are going to ask a question on the mailing list, then
+#	explain what you are trying to do, and include the output from
+#	debugging mode (radiusd -X).  Failure to do so means that all
+#	of the responses to your question will be people telling you
+#	to "post the output of radiusd -X".
+
+######################################################################
+#
+#  	The location of other config files and logfiles are declared
+#  	in this file.
+#
+#  	Also general configuration for modules can be done in this
+#  	file, it is exported through the API to modules that ask for
+#  	it.
+#
+#	See "man radiusd.conf" for documentation on the format of this
+#	file.  Note that the individual configuration items are NOT
+#	documented in that "man" page.  They are only documented here,
+#	in the comments.
+#
+#	The "unlang" policy language can be used to create complex
+#	if / else policies.  See "man unlang" for details.
+#
+
+prefix = /usr
+exec_prefix = /usr
+sysconfdir = /etc
+localstatedir = /var
+sbindir = ${exec_prefix}/sbin
+logdir = /var/log/freeradius
+raddbdir = /etc/freeradius
+radacctdir = ${logdir}/radacct
+
+#
+#  name of the running server.  See also the "-n" command-line option.
+name = radiusd
+
+#  Location of config and logfiles.
+confdir = ${raddbdir}
+modconfdir = ${confdir}/mods-config
+certdir = ${confdir}/certs_2
+cadir   = ${confdir}/certs_2
+run_dir = ${localstatedir}/run/${name}
+
+# Should likely be ${localstatedir}/lib/radiusd
+db_dir = ${raddbdir}
+
+#
+# libdir: Where to find the rlm_* modules.
+#
+#   This should be automatically set at configuration time.
+#
+#   If the server builds and installs, but fails at execution time
+#   with an 'undefined symbol' error, then you can use the libdir
+#   directive to work around the problem.
+#
+#   The cause is usually that a library has been installed on your
+#   system in a place where the dynamic linker CANNOT find it.  When
+#   executing as root (or another user), your personal environment MAY
+#   be set up to allow the dynamic linker to find the library.  When
+#   executing as a daemon, FreeRADIUS MAY NOT have the same
+#   personalized configuration.
+#
+#   To work around the problem, find out which library contains that symbol,
+#   and add the directory containing that library to the end of 'libdir',
+#   with a colon separating the directory names.  NO spaces are allowed.
+#
+#   e.g. libdir = /usr/local/lib:/opt/package/lib
+#
+#   You can also try setting the LD_LIBRARY_PATH environment variable
+#   in a script which starts the server.
+#
+#   If that does not work, then you can re-configure and re-build the
+#   server to NOT use shared libraries, via:
+#
+#	./configure --disable-shared
+#	make
+#	make install
+#
+libdir = /usr/lib/freeradius
+
+#  pidfile: Where to place the PID of the RADIUS server.
+#
+#  The server may be signalled while it's running by using this
+#  file.
+#
+#  This file is written when ONLY running in daemon mode.
+#
+#  e.g.:  kill -HUP `cat /var/run/radiusd/radiusd.pid`
+#
+pidfile = ${run_dir}/${name}.pid
+
+#  panic_action: Command to execute if the server dies unexpectedly.
+#
+#  FOR PRODUCTION SYSTEMS, ACTIONS SHOULD ALWAYS EXIT.
+#  AN INTERACTIVE ACTION MEANS THE SERVER IS NOT RESPONDING TO REQUESTS.
+#  AN INTERACTICE ACTION MEANS THE SERVER WILL NOT RESTART.
+#
+#  THE SERVER MUST NOT BE ALLOWED EXECUTE UNTRUSTED PANIC ACTION CODE
+#  PATTACH CAN BE USED AS AN ATTACK VECTOR.
+#
+#  The panic action is a command which will be executed if the server
+#  receives a fatal, non user generated signal, i.e. SIGSEGV, SIGBUS,
+#  SIGABRT or SIGFPE.
+#
+#  This can be used to start an interactive debugging session so
+#  that information regarding the current state of the server can
+#  be acquired.
+#
+#  The following string substitutions are available:
+#  - %e   The currently executing program e.g. /sbin/radiusd
+#  - %p   The PID of the currently executing program e.g. 12345
+#
+#  Standard ${} substitutions are also allowed.
+#
+#  An example panic action for opening an interactive session in GDB would be:
+#
+#panic_action = "gdb %e %p"
+#
+#  Again, don't use that on a production system.
+#
+#  An example panic action for opening an automated session in GDB would be:
+#
+#panic_action = "gdb -silent -x ${raddbdir}/panic.gdb %e %p 2>&1 | tee ${logdir}/gdb-${name}-%p.log"
+#
+#  That command can be used on a production system.
+#
+
+#  max_request_time: The maximum time (in seconds) to handle a request.
+#
+#  Requests which take more time than this to process may be killed, and
+#  a REJECT message is returned.
+#
+#  WARNING: If you notice that requests take a long time to be handled,
+#  then this MAY INDICATE a bug in the server, in one of the modules
+#  used to handle a request, OR in your local configuration.
+#
+#  This problem is most often seen when using an SQL database.  If it takes
+#  more than a second or two to receive an answer from the SQL database,
+#  then it probably means that you haven't indexed the database.  See your
+#  SQL server documentation for more information.
+#
+#  Useful range of values: 5 to 120
+#
+max_request_time = 30
+
+#  cleanup_delay: The time to wait (in seconds) before cleaning up
+#  a reply which was sent to the NAS.
+#
+#  The RADIUS request is normally cached internally for a short period
+#  of time, after the reply is sent to the NAS.  The reply packet may be
+#  lost in the network, and the NAS will not see it.  The NAS will then
+#  re-send the request, and the server will respond quickly with the
+#  cached reply.
+#
+#  If this value is set too low, then duplicate requests from the NAS
+#  MAY NOT be detected, and will instead be handled as separate requests.
+#
+#  If this value is set too high, then the server will cache too many
+#  requests, and some new requests may get blocked.  (See 'max_requests'.)
+#
+#  Useful range of values: 2 to 10
+#
+cleanup_delay = 5
+
+#  max_requests: The maximum number of requests which the server keeps
+#  track of.  This should be 256 multiplied by the number of clients.
+#  e.g. With 4 clients, this number should be 1024.
+#
+#  If this number is too low, then when the server becomes busy,
+#  it will not respond to any new requests, until the 'cleanup_delay'
+#  time has passed, and it has removed the old requests.
+#
+#  If this number is set too high, then the server will use a bit more
+#  memory for no real benefit.
+#
+#  If you aren't sure what it should be set to, it's better to set it
+#  too high than too low.  Setting it to 1000 per client is probably
+#  the highest it should be.
+#
+#  Useful range of values: 256 to infinity
+#
+max_requests = 1024
+
+#  hostname_lookups: Log the names of clients or just their IP addresses
+#  e.g., www.freeradius.org (on) or 206.47.27.232 (off).
+#
+#  The default is 'off' because it would be overall better for the net
+#  if people had to knowingly turn this feature on, since enabling it
+#  means that each client request will result in AT LEAST one lookup
+#  request to the nameserver.   Enabling hostname_lookups will also
+#  mean that your server may stop randomly for 30 seconds from time
+#  to time, if the DNS requests take too long.
+#
+#  Turning hostname lookups off also means that the server won't block
+#  for 30 seconds, if it sees an IP address which has no name associated
+#  with it.
+#
+#  allowed values: {no, yes}
+#
+hostname_lookups = no
+
+#
+#  Logging section.  The various "log_*" configuration items
+#  will eventually be moved here.
+#
+log {
+	#
+	#  Destination for log messages.  This can be one of:
+	#
+	#	files - log to "file", as defined below.
+	#	syslog - to syslog (see also the "syslog_facility", below.
+	#	stdout - standard output
+	#	stderr - standard error.
+	#
+	#  The command-line option "-X" over-rides this option, and forces
+	#  logging to go to stdout.
+	#
+	destination = files
+
+	#
+	#  Highlight important messages sent to stderr and stdout.
+	#
+	#  Option will be ignored (disabled) if output if TERM is not
+	#  an xterm or output is not to a TTY.
+	#
+	colourise = yes
+
+	#
+	#  The logging messages for the server are appended to the
+	#  tail of this file if destination == "files"
+	#
+	#  If the server is running in debugging mode, this file is
+	#  NOT used.
+	#
+	file = ${logdir}/radius.log
+
+	#
+	#  If this configuration parameter is set, then log messages for
+	#  a *request* go to this file, rather than to radius.log.
+	#
+	#  i.e. This is a log file per request, once the server has accepted
+	#  the request as being from a valid client.  Messages that are
+	#  not associated with a request still go to radius.log.
+	#
+	#  Not all log messages in the server core have been updated to use
+	#  this new internal API.  As a result, some messages will still
+	#  go to radius.log.  Please submit patches to fix this behavior.
+	#
+	#  The file name is expanded dynamically.  You should ONLY user
+	#  server-side attributes for the filename (e.g. things you control).
+	#  Using this feature MAY also slow down the server substantially,
+	#  especially if you do thinks like SQL calls as part of the
+	#  expansion of the filename.
+	#
+	#  The name of the log file should use attributes that don't change
+	#  over the lifetime of a request, such as User-Name,
+	#  Virtual-Server or Packet-Src-IP-Address.  Otherwise, the log
+	#  messages will be distributed over multiple files.
+	#
+	#  Logging can be enabled for an individual request by a special
+	#  dynamic expansion macro:  %{debug: 1}, where the debug level
+	#  for this request is set to '1' (or 2, 3, etc.).  e.g.
+	#
+	#	...
+	#	update control {
+	#	       Tmp-String-0 = "%{debug:1}"
+	#	}
+	#	...
+	#
+	#  The attribute that the value is assigned to is unimportant,
+	#  and should be a "throw-away" attribute with no side effects.
+	#
+	#requests = ${logdir}/radiusd-%{%{Virtual-Server}:-DEFAULT}-%Y%m%d.log
+
+	#
+	#  Which syslog facility to use, if ${destination} == "syslog"
+	#
+	#  The exact values permitted here are OS-dependent.  You probably
+	#  don't want to change this.
+	#
+	syslog_facility = daemon
+
+	#  Log the full User-Name attribute, as it was found in the request.
+	#
+	# allowed values: {no, yes}
+	#
+	stripped_names = no
+
+	#  Log authentication requests to the log file.
+	#
+	#  allowed values: {no, yes}
+	#
+	auth = no
+
+	#  Log passwords with the authentication requests.
+	#  auth_badpass  - logs password if it's rejected
+	#  auth_goodpass - logs password if it's correct
+	#
+	#  allowed values: {no, yes}
+	#
+	auth_badpass = no
+	auth_goodpass = no
+
+	#  Log additional text at the end of the "Login OK" messages.
+	#  for these to work, the "auth" and "auth_goodpass" or "auth_badpass"
+	#  configurations above have to be set to "yes".
+	#
+	#  The strings below are dynamically expanded, which means that
+	#  you can put anything you want in them.  However, note that
+	#  this expansion can be slow, and can negatively impact server
+	#  performance.
+	#
+#	msg_goodpass = ""
+#	msg_badpass = ""
+
+	#  The message when the user exceeds the Simultaneous-Use limit.
+	#
+	msg_denied = "You are already logged in - access denied"
+}
+
+#  The program to execute to do concurrency checks.
+checkrad = ${sbindir}/checkrad
+
+# SECURITY CONFIGURATION
+#
+#  There may be multiple methods of attacking on the server.  This
+#  section holds the configuration items which minimize the impact
+#  of those attacks
+#
+security {
+	#  chroot: directory where the server does "chroot".
+	#
+	#  The chroot is done very early in the process of starting
+	#  the server.  After the chroot has been performed it
+	#  switches to the "user" listed below (which MUST be
+	#  specified).  If "group" is specified, it switches to that
+	#  group, too.  Any other groups listed for the specified
+	#  "user" in "/etc/group" are also added as part of this
+	#  process.
+	#
+	#  The current working directory (chdir / cd) is left
+	#  *outside* of the chroot until all of the modules have been
+	#  initialized.  This allows the "raddb" directory to be left
+	#  outside of the chroot.  Once the modules have been
+	#  initialized, it does a "chdir" to ${logdir}.  This means
+	#  that it should be impossible to break out of the chroot.
+	#
+	#  If you are worried about security issues related to this
+	#  use of chdir, then simply ensure that the "raddb" directory
+	#  is inside of the chroot, end be sure to do "cd raddb"
+	#  BEFORE starting the server.
+	#
+	#  If the server is statically linked, then the only files
+	#  that have to exist in the chroot are ${run_dir} and
+	#  ${logdir}.  If you do the "cd raddb" as discussed above,
+	#  then the "raddb" directory has to be inside of the chroot
+	#  directory, too.
+	#
+#	chroot = /path/to/chroot/directory
+
+	# user/group: The name (or #number) of the user/group to run radiusd as.
+	#
+	#   If these are commented out, the server will run as the
+	#   user/group that started it.  In order to change to a
+	#   different user/group, you MUST be root ( or have root
+	#   privileges ) to start the server.
+	#
+	#   We STRONGLY recommend that you run the server with as few
+	#   permissions as possible.  That is, if you're not using
+	#   shadow passwords, the user and group items below should be
+	#   set to radius'.
+	#
+	#  NOTE that some kernels refuse to setgid(group) when the
+	#  value of (unsigned)group is above 60000; don't use group
+	#  "nobody" on these systems!
+	#
+	#  On systems with shadow passwords, you might have to set
+	#  'group = shadow' for the server to be able to read the
+	#  shadow password file.  If you can authenticate users while
+	#  in debug mode, but not in daemon mode, it may be that the
+	#  debugging mode server is running as a user that can read
+	#  the shadow info, and the user listed below can not.
+	#
+	#  The server will also try to use "initgroups" to read
+	#  /etc/groups.  It will join all groups where "user" is a
+	#  member.  This can allow for some finer-grained access
+	#  controls.
+	#
+#	user = radius
+#	group = radius
+
+	#  Core dumps are a bad thing.  This should only be set to
+	#  'yes' if you're debugging a problem with the server.
+	#
+	#  allowed values: {no, yes}
+	#
+	allow_core_dumps = no
+
+	#
+	#  max_attributes: The maximum number of attributes
+	#  permitted in a RADIUS packet.  Packets which have MORE
+	#  than this number of attributes in them will be dropped.
+	#
+	#  If this number is set too low, then no RADIUS packets
+	#  will be accepted.
+	#
+	#  If this number is set too high, then an attacker may be
+	#  able to send a small number of packets which will cause
+	#  the server to use all available memory on the machine.
+	#
+	#  Setting this number to 0 means "allow any number of attributes"
+	max_attributes = 200
+
+	#
+	#  reject_delay: When sending an Access-Reject, it can be
+	#  delayed for a few seconds.  This may help slow down a DoS
+	#  attack.  It also helps to slow down people trying to brute-force
+	#  crack a users password.
+	#
+	#  Setting this number to 0 means "send rejects immediately"
+	#
+	#  If this number is set higher than 'cleanup_delay', then the
+	#  rejects will be sent at 'cleanup_delay' time, when the request
+	#  is deleted from the internal cache of requests.
+	#
+	#  Useful ranges: 1 to 5
+	reject_delay = 1
+
+	#
+	#  status_server: Whether or not the server will respond
+	#  to Status-Server requests.
+	#
+	#  When sent a Status-Server message, the server responds with
+	#  an Access-Accept or Accounting-Response packet.
+	#
+	#  This is mainly useful for administrators who want to "ping"
+	#  the server, without adding test users, or creating fake
+	#  accounting packets.
+	#
+	#  It's also useful when a NAS marks a RADIUS server "dead".
+	#  The NAS can periodically "ping" the server with a Status-Server
+	#  packet.  If the server responds, it must be alive, and the
+	#  NAS can start using it for real requests.
+	#
+	#  See also raddb/sites-available/status
+	#
+	status_server = yes
+
+	#
+	#  allow_vulnerable_openssl = yes
+	#  versions of OpenSSL known to have critical vulnerabilities.
+	#
+	#  This check is based on the version number reported by libssl
+	#  and may not reflect patches applied to libssl by
+	#  distribution maintainers.
+	#
+	allow_vulnerable_openssl = yes
+}
+
+# PROXY CONFIGURATION
+#
+#  proxy_requests: Turns proxying of RADIUS requests on or off.
+#
+#  The server has proxying turned on by default.  If your system is NOT
+#  set up to proxy requests to another server, then you can turn proxying
+#  off here.  This will save a small amount of resources on the server.
+#
+#  If you have proxying turned off, and your configuration files say
+#  to proxy a request, then an error message will be logged.
+#
+#  To disable proxying, change the "yes" to "no", and comment the
+#  $INCLUDE line.
+#
+#  allowed values: {no, yes}
+#
+proxy_requests  = yes
+$INCLUDE proxy.conf
+
+
+# CLIENTS CONFIGURATION
+#
+#  Client configuration is defined in "clients.conf".
+#
+
+#  The 'clients.conf' file contains all of the information from the old
+#  'clients' and 'naslist' configuration files.  We recommend that you
+#  do NOT use 'client's or 'naslist', although they are still
+#  supported.
+#
+#  Anything listed in 'clients.conf' will take precedence over the
+#  information from the old-style configuration files.
+#
+$INCLUDE clients.conf
+
+
+# THREAD POOL CONFIGURATION
+#
+#  The thread pool is a long-lived group of threads which
+#  take turns (round-robin) handling any incoming requests.
+#
+#  You probably want to have a few spare threads around,
+#  so that high-load situations can be handled immediately.  If you
+#  don't have any spare threads, then the request handling will
+#  be delayed while a new thread is created, and added to the pool.
+#
+#  You probably don't want too many spare threads around,
+#  otherwise they'll be sitting there taking up resources, and
+#  not doing anything productive.
+#
+#  The numbers given below should be adequate for most situations.
+#
+thread pool {
+	#  Number of servers to start initially --- should be a reasonable
+	#  ballpark figure.
+	start_servers = 5
+
+	#  Limit on the total number of servers running.
+	#
+	#  If this limit is ever reached, clients will be LOCKED OUT, so it
+	#  should NOT BE SET TOO LOW.  It is intended mainly as a brake to
+	#  keep a runaway server from taking the system with it as it spirals
+	#  down...
+	#
+	#  You may find that the server is regularly reaching the
+	#  'max_servers' number of threads, and that increasing
+	#  'max_servers' doesn't seem to make much difference.
+	#
+	#  If this is the case, then the problem is MOST LIKELY that
+	#  your back-end databases are taking too long to respond, and
+	#  are preventing the server from responding in a timely manner.
+	#
+	#  The solution is NOT do keep increasing the 'max_servers'
+	#  value, but instead to fix the underlying cause of the
+	#  problem: slow database, or 'hostname_lookups=yes'.
+	#
+	#  For more information, see 'max_request_time', above.
+	#
+	max_servers = 32
+
+	#  Server-pool size regulation.  Rather than making you guess
+	#  how many servers you need, FreeRADIUS dynamically adapts to
+	#  the load it sees, that is, it tries to maintain enough
+	#  servers to handle the current load, plus a few spare
+	#  servers to handle transient load spikes.
+	#
+	#  It does this by periodically checking how many servers are
+	#  waiting for a request.  If there are fewer than
+	#  min_spare_servers, it creates a new spare.  If there are
+	#  more than max_spare_servers, some of the spares die off.
+	#  The default values are probably OK for most sites.
+	#
+	min_spare_servers = 3
+	max_spare_servers = 10
+
+	#  When the server receives a packet, it places it onto an
+	#  internal queue, where the worker threads (configured above)
+	#  pick it up for processing.  The maximum size of that queue
+	#  is given here.
+	#
+	#  When the queue is full, any new packets will be silently
+	#  discarded.
+	#
+	#  The most common cause of the queue being full is that the
+	#  server is dependent on a slow database, and it has received
+	#  a large "spike" of traffic.  When that happens, there is
+	#  very little you can do other than make sure the server
+	#  receives less traffic, or make sure that the database can
+	#  handle the load.
+	#
+#	max_queue_size = 65536
+
+	#  There may be memory leaks or resource allocation problems with
+	#  the server.  If so, set this value to 300 or so, so that the
+	#  resources will be cleaned up periodically.
+	#
+	#  This should only be necessary if there are serious bugs in the
+	#  server which have not yet been fixed.
+	#
+	#  '0' is a special value meaning 'infinity', or 'the servers never
+	#  exit'
+	max_requests_per_server = 0
+
+	#  Automatically limit the number of accounting requests.
+	#  This configuration item tracks how many requests per second
+	#  the server can handle.  It does this by tracking the
+	#  packets/s received by the server for processing, and
+	#  comparing that to the packets/s handled by the child
+	#  threads.
+	#
+
+	#  If the received PPS is larger than the processed PPS, *and*
+	#  the queue is more than half full, then new accounting
+	#  requests are probabilistically discarded.  This lowers the
+	#  number of packets that the server needs to process.  Over
+	#  time, the server will "catch up" with the traffic.
+	#
+	#  Throwing away accounting packets is usually safe and low
+	#  impact.  The NAS will retransmit them in a few seconds, or
+	#  even a few minutes.  Vendors should read RFC 5080 Section 2.2.1
+	#  to see how accounting packets should be retransmitted.  Using
+	#  any other method is likely to cause network meltdowns.
+	#
+	auto_limit_acct = no
+}
+
+# MODULE CONFIGURATION
+#
+#  The names and configuration of each module is located in this section.
+#
+#  After the modules are defined here, they may be referred to by name,
+#  in other sections of this configuration file.
+#
+modules {
+	#
+	#  Each module has a configuration as follows:
+	#
+	#	name [ instance ] {
+	#		config_item = value
+	#		...
+	#	}
+	#
+	#  The 'name' is used to load the 'rlm_name' library
+	#  which implements the functionality of the module.
+	#
+	#  The 'instance' is optional.  To have two different instances
+	#  of a module, it first must be referred to by 'name'.
+	#  The different copies of the module are then created by
+	#  inventing two 'instance' names, e.g. 'instance1' and 'instance2'
+	#
+	#  The instance names can then be used in later configuration
+	#  INSTEAD of the original 'name'.  See the 'radutmp' configuration
+	#  for an example.
+	#
+
+	#
+	#  As of 3.0, modules are in mods-enabled/.  Files matching
+	#  the regex /[a-zA-Z0-9_.]+/ are loaded.  The modules are
+	#  initialized ONLY if they are referenced in a processing
+	#  section, such as authorize, authenticate, accounting,
+	#  pre/post-proxy, etc.
+	#
+	$INCLUDE mods-enabled/
+}
+
+# Instantiation
+#
+#  This section orders the loading of the modules.  Modules
+#  listed here will get loaded BEFORE the later sections like
+#  authorize, authenticate, etc. get examined.
+#
+#  This section is not strictly needed.  When a section like
+#  authorize refers to a module, it's automatically loaded and
+#  initialized.  However, some modules may not be listed in any
+#  of the following sections, so they can be listed here.
+#
+#  Also, listing modules here ensures that you have control over
+#  the order in which they are initialized.  If one module needs
+#  something defined by another module, you can list them in order
+#  here, and ensure that the configuration will be OK.
+#
+#  After the modules listed here have been loaded, all of the modules
+#  in the "mods-enabled" directory will be loaded.  Loading the
+#  "mods-enabled" directory means that unlike Version 2, you usually
+#  don't need to list modules here.
+#
+instantiate {
+	#
+	# We list the counter module here so that it registers
+	# the check_name attribute before any module which sets
+	# it
+#	daily
+
+	# subsections here can be thought of as "virtual" modules.
+	#
+	# e.g. If you have two redundant SQL servers, and you want to
+	# use them in the authorize and accounting sections, you could
+	# place a "redundant" block in each section, containing the
+	# exact same text.  Or, you could uncomment the following
+	# lines, and list "redundant_sql" in the authorize and
+	# accounting sections.
+	#
+	#redundant redundant_sql {
+	#	sql1
+	#	sql2
+	#}
+}
+
+######################################################################
+#
+#  Policies are virtual modules, similar to those defined in the
+#  "instantiate" section above.
+#
+#  Defining a policy in one of the policy.d files means that it can be
+#  referenced in multiple places as a *name*, rather than as a series of
+#  conditions to match, and actions to take.
+#
+#  Policies are something like subroutines in a normal language, but
+#  they cannot be called recursively. They MUST be defined in order.
+#  If policy A calls policy B, then B MUST be defined before A.
+#
+######################################################################
+policy {
+	$INCLUDE policy.d/
+}
+
+######################################################################
+#
+#  SNMP notifications.  Uncomment the following line to enable
+#  snmptraps.  Note that you MUST also configure the full path
+#  to the "snmptrap" command in the "trigger.conf" file.
+#
+#$INCLUDE trigger.conf
+
+######################################################################
+#
+#	Load virtual servers.
+#
+#	This next $INCLUDE line loads files in the directory that
+#	match the regular expression: /[a-zA-Z0-9_.]+/
+#
+#	It allows you to define new virtual servers simply by placing
+#	a file into the raddb/sites-enabled/ directory.
+#
+$INCLUDE sites-enabled/
+
+######################################################################
+#
+#	All of the other configuration sections like "authorize {}",
+#	"authenticate {}", "accounting {}", have been moved to the
+#	the file:
+#
+#		raddb/sites-available/default
+#
+#	This is the "default" virtual server that has the same
+#	configuration as in version 1.0.x and 1.1.x.  The default
+#	installation enables this virtual server.  You should
+#	edit it to create policies for your local site.
+#
+#	For more documentation on virtual servers, see:
+#
+#		raddb/sites-available/README
+#
+######################################################################
diff --git a/src/test/setup/radius-config/freeradius/sites-available/README b/src/test/setup/radius-config/freeradius/sites-available/README
new file mode 100644
index 0000000..55036f0
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/README
@@ -0,0 +1,335 @@
+1.  Virtual Servers.
+
+  FreeRADIUS 2.0 supports virtual servers.  This is probably the
+single largest change that is NOT backwards compatible with 1.x.
+
+  The virtual servers do NOT have to be set up with the
+"sites-available" and "sites-enabled" directories.  You can still have
+one "radiusd.conf" file, and put the server configuration there:
+
+	...
+	server {
+		authorize {
+			...
+		}
+		authenticate {
+			...
+		}
+		...
+	}
+	...
+
+  The power of virtual servers lies in their ability to separate
+policies.  A policy can be placed into a virtual server, where it is
+guaranteed to affect only the requests that are passed through that
+virtual server.  In 1.x, the policies were global, and it sometimes
+took much effort to write a policy so that it only applied in certain
+limited situations.
+
+
+2.  What do we mean by "virtual server"?
+
+
+  A virtual server is a (nearly complete) RADIUS server, just like a
+configuration for FreeRADIUS 1.x.  However, FreeRADIUS can now run
+multiple virtual servers at the same time.  The virtual servers can
+even proxy requests to each other!
+
+  The simplest way to create a virtual server is to take the all of
+the request processing sections from radius.conf, ("authorize" ,
+"authenticate", etc.) and wrap them in a "server {}" block, as above.
+
+  You can create another virtual server by:
+
+    1) defining a new "server foo {...}" section in radiusd.conf
+    2) Putting the normal "authorize", etc. sections inside of it
+    3) Adding a "listen" section *inside* of the "server" section.
+
+  e.g.
+
+	...
+	server foo {
+		listen {
+			ipaddr = 127.0.0.1
+			port = 2000
+			type = auth
+		}
+
+		authorize {
+			update control {
+				Cleartext-Password := "bob"
+			}
+			pap
+		}
+
+		authenticate {
+			pap
+		}
+	}
+	...
+
+  With that text added to "radiusd.conf", run the server in debugging
+mode (radiusd -X), and in another terminal window, type:
+
+$ radtest bob bob localhost:2000 0 testing123
+
+  You should see the server return an Access-Accept.
+
+
+3. Capabilities and limitations
+
+
+  The only sub-sections that can appear in a virtual server section
+are:
+
+	listen
+	client
+	authorize
+	authenticate
+	post-auth
+	pre-proxy
+	post-proxy
+	preacct
+	accounting
+	session
+
+  All other configuration parameters (modules, etc.) are global.
+
+  Inside of a virtual server, the authorize, etc. sections have their
+normal meaning, and can contain anything that an authorize section
+could contain in 1.x.
+
+  When a "listen" section is inside of a virtual server definition, it
+means that all requests sent to that IP/port will be processed through
+the virtual server.  There cannot be two "listen" sections with the
+same IP address and port number.
+
+  When a "client" section is inside of a virtual server definition, it
+means that that client is known only to the "listen" sections that are
+also inside of that virtual server.  Not only is this client
+definition available only to this virtual server, but the details of
+the client configuration is also available only to this virtual
+server.
+
+  i.e. Two virtual servers can listen on different IP address and
+ports, but both can have a client with IP address 127.0.0.1.  The
+shared secret for that client can be different for each virtual
+server.
+
+
+4. More complex "listen" capabilities
+
+  The "listen" sections have a few additional configuration items that
+were not in 1.x, and were not mentioned above.  These configuration
+items enable almost any mapping of IP / port to clients to virtual
+servers.
+
+  The configuration items are:
+
+	virtual_server = <name>
+
+		If set, all requests sent to this IP / port are processed
+		through the named virtual server.
+
+		This directive can be used only for "listen" sections
+		that are global.  i.e. It CANNOT be used if the
+		"listen" section is inside of a virtual server.
+
+	clients = <name>
+
+		If set, the "listen" section looks for a "clients" section:
+
+			clients <name> {
+				...
+			}
+
+		It looks inside of that named "clients" section for
+		"client" subsections, at least one of which must
+		exist.  Each client in that section is added to the
+		list of known clients for this IP / port.  No other
+		clients are known.
+
+		If it is set, it over-rides the list of clients (if
+		any) in the same virtual server.  Note that the
+		clients are NOT additive!
+
+		If it is not set, then the clients from the current
+		virtual server (if any) are used.  If there are no
+		clients in this virtual server, then the global
+		clients are used.
+
+		i.e. The most specific directive is used:
+			* configuration in this "listen" section
+			* clients in the same virtual server
+			* global clients
+
+		The directives are also *exclusive*, not *additive*.
+		If you have one client in a virtual server, and
+		another client referenced from a "listen" section,
+		then that "listen" section will ONLY use the second
+		client.  It will NOT use both clients.
+
+
+5. More complex "client" capabilities
+
+  The "client" sections have a few additional configuration items that
+were not in 1.x, and were not mentioned above.  These configuration
+items enable almost any mapping of IP / port to clients to virtual
+servers.
+
+  The configuration items are:
+
+	virtual_server = <name>
+
+		If set, all requests from this client are processed
+		through the named virtual server.
+
+		This directive can be used only for "client" sections
+		that are global.  i.e. It CANNOT be used if the
+		"client" section is inside of a virtual server.
+
+  If the "listen" section has a "server" entry, and a matching
+client is found ALSO with a "server" entry, then the clients server is
+used for that request.
+
+
+6. Worked examples
+
+
+  Listening on one socket, and mapping requests from two clients to
+two different servers.
+
+	listen {
+		...
+	}
+	client one {
+		...
+		virtual_server = server_one
+	}
+	client two {
+		...
+		virtual_server = server_two
+	}
+	server server_one {
+		authorize {
+			...
+		}
+		...
+	}
+	server server_two {
+		authorize {
+			...
+		}
+		...
+	}
+
+  This could also be done as:
+
+
+	listen {
+		...
+		virtual_server = server_one
+	}
+	client one {
+		...
+	}
+	client two {
+		...
+		virtual_server = server_two
+	}
+	server server_one {
+		authorize {
+			...
+		}
+		...
+	}
+	server server_two {
+		authorize {
+			...
+		}
+		...
+	}
+
+  In this case, the default server for the socket is "server_one", so
+there is no need to set that in the client "one" configuration.  The
+"server_two" configuration for client "two" over-rides the default
+setting for the socket.
+
+  Note that the following configuration will NOT work:
+
+	listen {
+		...
+		virtual_server = server_one
+	}
+	client one {
+		...
+	}
+	server server_one {
+		authorize {
+			...
+		}
+		...
+	}
+	server server_two {
+		client two {
+			...
+		}
+		authorize {
+			...
+		}
+		...
+	}
+
+  In this example, client "two" is hidden inside of the virtual
+server, where the "listen" section cannot find it.
+
+
+7. Outlined examples
+
+  This section outlines a number of examples, with alternatives.
+
+  One server, multiple sockets
+	- multiple "listen" sections in a "server" section
+
+  one server per client
+	- define multiple servers
+	- have a global "listen" section
+	- have multiple global "clients", each with "virtual_server = X"
+
+  two servers, each with their own sockets
+	- define multiple servers
+	- put "client" sections into each "server"
+	- put a "listen" section into each "server"
+
+	Each server can list the same client IP, and the secret
+	can be different
+
+  two sockets, sharing a list of clients, but pointing to different servers
+	- define global "listen" sections
+	- in each, set "virtual_server = X"
+	- in each, set "clients = Y"
+	- define "clients Y" section, containing multiple clients.
+
+	This also means that you can have a third socket, which
+	doesn't share any of these clients.
+
+
+8.  How to decide what to do
+
+
+  If you want *completely* separate policies for a socket or a client,
+then create a separate virtual server.  Then, map the request to that
+server by setting configuration entries in a "listen" section or in a
+"client" section.
+
+  Start off with the common cases first.  If most of the clients
+and/or sockets get a particular policy, make that policy the default.
+Configure it without paying attention to the sockets or clients you
+want to add later, and without adding a second virtual server.  Once
+it works, then add the second virtual server.
+
+  If you want to re-use the previously defined sockets with the second
+virtual server, then you will need one or more global "client"
+sections.  Those clients will contain a "virtual_server = ..." entry
+that will direct requests from those clients to the appropriate
+virtual server.
diff --git a/src/test/setup/radius-config/freeradius/sites-available/buffered-sql b/src/test/setup/radius-config/freeradius/sites-available/buffered-sql
new file mode 100644
index 0000000..4217d99
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/buffered-sql
@@ -0,0 +1,129 @@
+# -*- text -*-
+######################################################################
+#
+#	In 2.0.0, radrelay functionality is integrated into the
+#	server core.  This virtual server gives an example of
+#	using radrelay functionality inside of the server.
+#
+#	In this example, the detail file is read, and the data
+#	is put into SQL.  This configuration is used when a RADIUS
+#	server on this machine is receiving accounting packets,
+#	and writing them to the detail file.
+#
+#	The purpose of this virtual server is to de-couple the storage
+#	of long-term accounting data in SQL from "live" information
+#	needed by the RADIUS server as it is running.
+#
+#	The benefit of this approach is that for a busy server, the
+#	overhead of performing SQL queries may be significant.  Also,
+#	if the SQL databases are large (as is typical for ones storing
+#	months of data), the INSERTs and UPDATEs may take a relatively
+#	long time.  Rather than slowing down the RADIUS server by
+#	having it interact with a database, you can just log the
+#	packets to a detail file, and then read that file later at a
+#	time when the RADIUS server is typically lightly loaded.
+#
+#	If you use on virtual server to log to the detail file,
+#	and another virtual server (i.e. this one) to read from
+#	the detail file, then this process will happen automatically.
+#	A sudden spike of RADIUS traffic means that the detail file
+#	will grow in size, and the server will be able to handle
+#	large volumes of traffic quickly.  When the traffic dies down,
+#	the server will have time to read the detail file, and insert
+#	the data into a long-term SQL database.
+#
+#	$Id: bc5abe8e104accca792de61201c741d07e825894 $
+#
+######################################################################
+
+server buffered-sql {
+	listen {
+		type = detail
+
+		#  The location where the detail file is located.
+		#  This should be on local disk, and NOT on an NFS
+		#  mounted location!
+		filename = "${radacctdir}/detail-*"
+
+		#
+		#  The server can read accounting packets from the
+		#  detail file much more quickly than those packets
+		#  can be written to a database.  If the database is
+		#  overloaded, then bad things can happen.
+		#
+		#  The server will keep track of how long it takes to
+		#  process an entry from the detail file.  It will
+		#  then pause between handling entries.  This pause
+		#  allows databases to "catch up", and gives the
+		#  server time to notice that other packets may have
+		#  arrived.
+		#
+		#  The pause is calculated dynamically, to ensure that
+		#  the load due to reading the detail files is limited
+		#  to a small percentage of CPU time.  The
+		#  "load_factor" configuration item is a number
+		#  between 1 and 100.  The server will try to keep the
+		#  percentage of time taken by "detail" file entries
+		#  to "load_factor" percentage of the CPU time.
+		#
+		#  If the "load_factor" is set to 100, then the server
+		#  will read packets as fast as it can, usually
+		#  causing databases to go into overload.
+		#
+		load_factor = 10
+
+		#
+		#  Set the interval for polling the detail file.
+		#  If the detail file doesn't exist, the server will
+		#  wake up, and poll for it every N seconds.
+		#
+		#  Useful range of values: 1 to 60
+		poll_interval = 1
+
+		#
+		#  Set the retry interval for when the home server
+		#  does not respond.  The current packet will be
+		#  sent repeatedly, at this interval, until the
+		#  home server responds.
+		#
+		#  Useful range of values: 5 to 30
+		retry_interval = 30
+
+	}
+
+	#
+	#  Pre-accounting.  Decide which accounting type to use.
+	#
+	preacct {
+		preprocess
+
+		#
+		#  Ensure that we have a semi-unique identifier for every
+		#  request, and many NAS boxes are broken.
+		acct_unique
+
+		#
+		#  Read the 'acct_users' file.  This isn't always
+		#  necessary, and can be deleted if you do not use it.
+		files
+	}
+
+	#
+	#  Accounting.  Log the accounting data.
+	#
+	accounting {
+		#
+		#  Log traffic to an SQL database.
+		#
+		#  See "Accounting queries" in sql.conf
+	#	sql
+
+
+		#  Cisco VoIP specific bulk accounting
+	#	pgsql-voip
+
+	}
+
+	# The requests are not being proxied, so no pre/post-proxy
+	# sections are necessary.
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls b/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls
new file mode 100644
index 0000000..d84378f
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/check-eap-tls
@@ -0,0 +1,131 @@
+# This virtual server allows EAP-TLS to reject access requests
+# based on some certificate attributes.
+#
+# Value-pairs that are available for checking include:
+#
+#   TLS-Client-Cert-Subject
+#   TLS-Client-Cert-Issuer
+#   TLS-Client-Cert-Common-Name
+#   TLS-Client-Cert-Subject-Alt-Name-Email
+#
+# To see a full list of attributes, run the server in debug mode
+# with this virtual server configured, and look at the attributes
+# passed in to this virtual server.
+#
+#
+# This virtual server is also useful when using EAP-TLS as it is only called
+# once, just before the final Accept is about to be returned from eap, whereas
+# the outer authorize section is called multiple times for each challenge /
+# response. For this reason, here may be a good location to put authentication
+# logging, and modules that check for further authorization, especially if they
+# hit external services such as sql or ldap.
+
+server check-eap-tls {
+
+
+# Authorize - this is the only section required.
+#
+# To accept the access request, set Auth-Type = Accept, otherwise
+# set it to Reject.
+
+authorize {
+
+	#
+	# By default, we just accept the request:
+	#
+	update config {
+		Auth-Type := Accept
+	}
+
+
+	#
+	# Check the client certificate matches a string, and reject otherwise
+	#
+
+#	if ("%{TLS-Client-Cert-Common-Name}" == "client.example.com") {
+#		update config {
+#			Auth-Type := Accept
+#		}
+#	}
+#	else {
+#		update config {
+#			Auth-Type := Reject
+#		}
+#		update reply {
+#			Reply-Message := "Your certificate is not valid."
+#		}
+#	}
+
+
+	#
+	# Check the client certificate common name against the supplied User-Name
+	#
+#	if ("host/%{TLS-Client-Cert-Common-Name}" == "%{User-Name}") {
+#		update config {
+#			Auth-Type := Accept
+#		}
+#	}
+#	else {
+#		update config {
+#			Auth-Type := Reject
+#		}
+#	}
+
+
+	#
+	# This is a convenient place to call LDAP, for example, when using
+	# EAP-TLS, as it will only be called once, after all certificates as
+	# part of the EAP-TLS challenge process have been verified.
+	#
+	# An example could be to use LDAP to check that the connecting host, as
+	# well as presenting a valid certificate, is also in a group based on
+	# the User-Name (assuming this contains the service principal name).
+	# Settings such as the following could be used in the ldap module
+	# configuration:
+	#
+	# basedn = "dc=example, dc=com"
+	# filter = "(servicePrincipalName=%{User-Name})"
+	# base_filter = "(objectClass=computer)"
+	# groupname_attribute = cn
+	# groupmembership_filter = "(&(objectClass=group)(member=%{control:Ldap-UserDn}))"
+
+#	ldap
+
+	# Now let's test membership of an LDAP group (the ldap bind user will
+	# need permission to read this group membership):
+
+#	if (!(Ldap-Group == "Permitted-Laptops")) {
+#		update config {
+#			Auth-Type := Reject
+#		}
+#	}
+
+	# or, to be more specific, you could use the group's full DN:
+	# if (!(Ldap-Group == "CN=Permitted-Laptops,OU=Groups,DC=example,DC=org")) {
+
+
+	#
+	# This may be a better place to call the files modules when using
+	# EAP-TLS, as it will only be called once, after the challenge-response
+	# iteration has completed.
+	#
+
+#	files
+
+
+	#
+	# Log all request attributes, plus TLS certificate details, to the
+	# detail auth_log. Again, this is just once per connection request, so
+	# may be preferable than in the outer authorize section. It is
+	# suggested that 'auth_log' also be in the outer post-auth and
+	# Post-Auth REJECT sections to log reply packet details, too.
+	#
+
+	auth_log
+
+}
+
+
+
+}
+
diff --git a/src/test/setup/radius-config/freeradius/sites-available/coa b/src/test/setup/radius-config/freeradius/sites-available/coa
new file mode 100644
index 0000000..66caa31
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/coa
@@ -0,0 +1,49 @@
+# -*- text -*-
+######################################################################
+#
+#  Sample virtual server for receiving a CoA or Disconnect-Request packet.
+#
+
+#  Listen on the CoA port.
+#
+#  This uses the normal set of clients, with the same secret as for
+#  authentication and accounting.
+#
+listen {
+	type = coa
+	ipaddr = *
+	port = 3799
+	server = coa
+}
+
+server coa {
+	#  When a packet is received, it is processed through the
+	#  recv-coa section.  This applies to *both* CoA-Request and
+	#  Disconnect-Request packets.
+	recv-coa {
+		#  CoA && Disconnect packets can be proxied in the same
+		#  way as authentication or accounting packets.
+		#  Just set Proxy-To-Realm, or Home-Server-Pool, and the
+		#  packets will be proxied.
+
+		#  Do proxying based on realms here.  You don't need
+		#  "IPASS" or "ntdomain", as the proxying is based on
+		#  the Operator-Name attribute.  It contains the realm,
+		#  and ONLY the realm (prefixed by a '1')
+		suffix
+
+		#  Insert your own policies here.
+		ok
+	}
+
+	#  When a packet is sent, it is processed through the
+	#  recv-coa section.  This applies to *both* CoA-Request and
+	#  Disconnect-Request packets.
+	send-coa {
+		#  Sample module.
+		ok
+	}
+
+	#  You can use pre-proxy and post-proxy sections here, too.
+	#  They will be processed for sending && receiving proxy packets.
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/control-socket b/src/test/setup/radius-config/freeradius/sites-available/control-socket
new file mode 100644
index 0000000..c3f813d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/control-socket
@@ -0,0 +1,73 @@
+# -*- text -*-
+######################################################################
+#
+#	Control socket interface.
+#
+#	In the future, we will add username/password checking for
+#	connections to the control socket.  We will also add
+#	command authorization, where the commands entered by the
+#	administrator are run through a virtual server before
+#	they are executed.
+#
+#	For now, anyone who has permission to connect to the socket
+#	has nearly complete control over the server.  Be warned!
+#
+#	This functionality is NOT enabled by default.
+#
+#	See also the "radmin" program, which is used to communicate
+#	with the server over the control socket.
+#
+#	$Id: 8d06082d3a8fba31bb1471aef19e28093cee4a9e $
+#
+######################################################################
+listen {
+	#
+	#  Listen on the control socket.
+	#
+	type = control
+
+	#
+	#  Socket location.
+	#
+	#  This file is created with the server's uid and gid.
+	#  It's permissions are r/w for that user and group, and
+	#  no permissions for "other" users.  These permissions form
+	#  minimal security, and should not be relied on.
+	#
+	socket = ${run_dir}/${name}.sock
+
+	#
+	#  The following two parameters perform authentication and
+	#  authorization of connections to the control socket.
+	#
+	#  If not set, then ANYONE can connect to the control socket,
+	#  and have complete control over the server.  This is likely
+	#  not what you want.
+	#
+	#  One, or both, of "uid" and "gid" should be set.  If set, the
+	#  corresponding value is checked.  Unauthorized users result
+	#  in an error message in the log file, and the connection is
+	#  closed.
+	#
+
+	#
+	#  Name of user that is allowed to connect to the control socket.
+	#
+#	uid = radius
+
+	#
+	#  Name of group that is allowed to connect to the control socket.
+	#
+#	gid = radius
+
+	#
+	#  Access mode.
+	#
+	#  This can be used to give *some* administrators access to
+	#  monitor the system, but not to change it.
+	#
+	#	ro = read only access (default)
+	#	rw = read/write access.
+	#
+#	mode = rw
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server b/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server
new file mode 100644
index 0000000..5f962f8
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/copy-acct-to-home-server
@@ -0,0 +1,169 @@
+# -*- text -*-
+######################################################################
+#
+#	In 2.0.0, radrelay functionality is integrated into the
+#	server core.  This virtual server gives an example of
+#	using radrelay functionality inside of the server.
+#
+#	In this example, the detail file is read, and the packets
+#	are proxied to a home server.  You will have to configure
+#	realms, home_server_pool, and home_server in proxy.conf
+#	for this to work.
+#
+#	The purpose of this virtual server is to enable duplication
+#	of information across a load-balanced, or fail-over set of
+#	servers.  For example, if a group of clients lists two
+#	home servers (primary, secondary), then RADIUS accounting
+#	messages will go only to one server at a time.  This file
+#	configures a server (primary, secondary) to send copies of
+#	the accounting information to each other.
+#
+#	That way, each server has the same set of information, and
+#	can make the same decision about the user.
+#
+#	$Id: 2869287260929f35d1a575b52014de20ce6cf3bb $
+#
+######################################################################
+
+server copy-acct-to-home-server {
+	listen {
+		type = detail
+
+		######################################################
+		#
+		#  !!!! WARNING !!!!
+		#
+		#  The detail file reader acts just like a NAS.
+		#
+		#  This means that if accounting fails, the packet
+		#  is re-tried FOREVER.  It is YOUR responsibility
+		#  to write an accounting policy that returns "ok"
+		#  if the packet was processed properly, "fail" on
+		#  a database error, AND "ok" if you want to ignore
+		#  the packet (e.g. no Acct-Status-Type).
+		#
+		#  Neither the detail file write OR the detail file
+		#  reader look at the contents of the packets.  They
+		#  just either dump the packet verbatim to the file,
+		#  or read it verbatim from the file and pass it to
+		#  the server.
+		#
+		######################################################
+
+
+		#  The location where the detail file is located.
+		#  This should be on local disk, and NOT on an NFS
+		#  mounted location!
+		#
+		#  On most systems, this should support file globbing
+		#  e.g. "${radacctdir}/detail-*:*"
+		#  This lets you write many smaller detail files as in
+		#  the example in radiusd.conf: ".../detail-%Y%m%d:%H"
+		#  Writing many small files is often better than writing
+		#  one large file.  File globbing also means that with
+		#  a common naming scheme for detail files, then you can
+		#  have many detail file writers, and only one reader.
+		filename = ${radacctdir}/detail
+
+		#
+		#  The server can read accounting packets from the
+		#  detail file much more quickly than those packets
+		#  can be written to a database.  If the database is
+		#  overloaded, then bad things can happen.
+		#
+		#  The server will keep track of how long it takes to
+		#  process an entry from the detail file.  It will
+		#  then pause between handling entries.  This pause
+		#  allows databases to "catch up", and gives the
+		#  server time to notice that other packets may have
+		#  arrived.
+		#
+		#  The pause is calculated dynamically, to ensure that
+		#  the load due to reading the detail files is limited
+		#  to a small percentage of CPU time.  The
+		#  "load_factor" configuration item is a number
+		#  between 1 and 100.  The server will try to keep the
+		#  percentage of time taken by "detail" file entries
+		#  to "load_factor" percentage of the CPU time.
+		#
+		#  If the "load_factor" is set to 100, then the server
+		#  will read packets as fast as it can, usually
+		#  causing databases to go into overload.
+		#
+		load_factor = 10
+	}
+
+	#
+	#  Pre-accounting.  Decide which accounting type to use.
+	#
+	preacct {
+		preprocess
+
+		# Since we're just proxying, we don't need acct_unique.
+
+		#
+		#  Look for IPASS-style 'realm/', and if not found, look for
+		#  '@realm', and decide whether or not to proxy, based on
+		#  that.
+		#
+		#  Accounting requests are generally proxied to the same
+		#  home server as authentication requests.
+	#	IPASS
+		suffix
+	#	ntdomain
+
+		#
+		#  Read the 'acct_users' file.  This isn't always
+		#  necessary, and can be deleted if you do not use it.
+		files
+	}
+
+	#
+	#  Accounting.  Log the accounting data.
+	#
+	accounting {
+		   #
+		   # Since we're proxying, we don't log anything
+		   # locally.  Ensure that the accounting section
+		   # "succeeds" by forcing an "ok" return.
+		   ok
+	}
+
+
+	#
+	#  When the server decides to proxy a request to a home server,
+	#  the proxied request is first passed through the pre-proxy
+	#  stage.  This stage can re-write the request, or decide to
+	#  cancel the proxy.
+	#
+	#  Only a few modules currently have this method.
+	#
+	pre-proxy {
+
+		#  If you want to have a log of packets proxied to a home
+		#  server, un-comment the following line, and the
+		#  'detail pre_proxy_log' section in radiusd.conf.
+	#	pre_proxy_log
+	}
+
+	#
+	#  When the server receives a reply to a request it proxied
+	#  to a home server, the request may be massaged here, in the
+	#  post-proxy stage.
+	#
+	post-proxy {
+		#
+
+		#  If you want to have a log of replies from a home
+		#  server, un-comment the following line, and the
+		#  'detail post_proxy_log' section in radiusd.conf.
+	#	post_proxy_log
+
+
+		#  Uncomment the following line if you want to filter
+		#  replies from remote proxies based on the rules
+		#  defined in the 'attrs' file.
+
+	#	attr_filter
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting b/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting
new file mode 100644
index 0000000..199258d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/decoupled-accounting
@@ -0,0 +1,140 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a sample configuration for "decoupled" accounting.
+#	"Decoupled" accounting is where the accounting packets are
+#	NOT written "live" to the back-end database.  This method
+#	can only be used if you are not interested in "live"
+#	accounting.  i.e. Where you can tolerate delays that may be
+#	a few seconds, before accounting packets get written to
+#	the DB.
+#
+#	Oddly enough, this method can speed up the processing of
+#	accounting packets, as all database activity is serialized.
+#
+#	This file is NOT meant to be used as-is.  It needs to be
+#	edited to match your local configuration.
+#
+#	$Id$
+#
+######################################################################
+
+#  Define a virtual server to write the accounting packets.
+#  Any "listen" section that listens on an accounting port should
+#  set "virtual_server = write-detail.example.com
+server write_detail.example.com {
+	accounting {
+		#
+		#  Write the "detail" files.
+		#
+		#  See raddb/modules/detail.example.com for more info.
+		detail.example.com
+	}
+
+	# That's it!
+}
+
+#  Define a virtual server to process the accounting packets.
+server read-detail.example.com {
+	#  Read accounting packets from the detail file(s) for
+	#  the home server.
+	listen {
+		type = detail
+		filename = "${radacctdir}/detail.example.com/detail-*:*"
+		load_factor = 10
+	}
+
+	#  All packets read from the detail file are processed through
+	#  the preacct && accounting sections.
+	#
+	#  The following text is copied verbatim from sites-available/default.
+	#  You should edit it for your own local configuration.
+
+#
+#  Pre-accounting.  Decide which accounting type to use.
+#
+preacct {
+	preprocess
+
+	#
+	#  Ensure that we have a semi-unique identifier for every
+	#  request, and many NAS boxes are broken.
+	acct_unique
+
+	#
+	#  Look for IPASS-style 'realm/', and if not found, look for
+	#  '@realm', and decide whether or not to proxy, based on
+	#  that.
+	#
+	#  Accounting requests are generally proxied to the same
+	#  home server as authentication requests.
+#	IPASS
+	suffix
+#	ntdomain
+
+	#
+	#  Read the 'acct_users' file
+	files
+}
+
+#
+#  Accounting.  Log the accounting data.
+#
+accounting {
+	#
+	#  Create a 'detail'ed log of the packets.
+	#  Note that accounting requests which are proxied
+	#  are also logged in the detail file.
+	detail
+#	daily
+
+	#  Update the wtmp file
+	#
+	#  If you don't use "radlast", you can delete this line.
+	unix
+
+	#
+	#  For Simultaneous-Use tracking.
+	#
+	#  Due to packet losses in the network, the data here
+	#  may be incorrect.  There is little we can do about it.
+	radutmp
+#	sradutmp
+
+	#  Return an address to the IP Pool when we see a stop record.
+#	main_pool
+
+	#
+	#  Log traffic to an SQL database.
+	#
+	#  NOTE! You will have to ensure that any accounting packets
+	#  NOT handled by the SQL module (e.g. "stop with zero session length"
+	#  result in the accounting section still returning "ok".
+	#
+	#  Otherwise, the server will think that the accounting packet
+	#  was NOT handled properly, and will keep trying to process it
+	#  through this virtual server!
+	#
+	#  See "Accounting queries" in sql.conf
+#	sql
+
+	#
+	#  Instead of sending the query to the SQL server,
+	#  write it into a log file.
+	#
+#	sql_log
+
+	#  Cisco VoIP specific bulk accounting
+#	pgsql-voip
+
+	#  Filter attributes from the accounting response.
+	attr_filter.accounting_response
+
+	#
+	#  See "Autz-Type Status-Server" for how this works.
+	#
+#	Acct-Type Status-Server {
+#
+#	}
+}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/default b/src/test/setup/radius-config/freeradius/sites-available/default
new file mode 100644
index 0000000..934f835
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/default
@@ -0,0 +1,844 @@
+######################################################################
+#
+#	As of 2.0.0, FreeRADIUS supports virtual hosts using the
+#	"server" section, and configuration directives.
+#
+#	Virtual hosts should be put into the "sites-available"
+#	directory.  Soft links should be created in the "sites-enabled"
+#	directory to these files.  This is done in a normal installation.
+#
+#	If you are using 802.1X (EAP) authentication, please see also
+#	the "inner-tunnel" virtual server.  You will likely have to edit
+#	that, too, for authentication to work.
+#
+#	$Id: 3278975e054fab504afda5ba8fc999239cb2fb9d $
+#
+######################################################################
+#
+#	Read "man radiusd" before editing this file.  See the section
+#	titled DEBUGGING.  It outlines a method where you can quickly
+#	obtain the configuration you want, without running into
+#	trouble.  See also "man unlang", which documents the format
+#	of this file.
+#
+#	This configuration is designed to work in the widest possible
+#	set of circumstances, with the widest possible number of
+#	authentication methods.  This means that in general, you should
+#	need to make very few changes to this file.
+#
+#	The best way to configure the server for your local system
+#	is to CAREFULLY edit this file.  Most attempts to make large
+#	edits to this file will BREAK THE SERVER.  Any edits should
+#	be small, and tested by running the server with "radiusd -X".
+#	Once the edits have been verified to work, save a copy of these
+#	configuration files somewhere.  (e.g. as a "tar" file).  Then,
+#	make more edits, and test, as above.
+#
+#	There are many "commented out" references to modules such
+#	as ldap, sql, etc.  These references serve as place-holders.
+#	If you need the functionality of that module, then configure
+#	it in radiusd.conf, and un-comment the references to it in
+#	this file.  In most cases, those small changes will result
+#	in the server being able to connect to the DB, and to
+#	authenticate users.
+#
+######################################################################
+
+server default {
+#
+#  If you want the server to listen on additional addresses, or on
+#  additional ports, you can use multiple "listen" sections.
+#
+#  Each section make the server listen for only one type of packet,
+#  therefore authentication and accounting have to be configured in
+#  different sections.
+#
+#  The server ignore all "listen" section if you are using '-i' and '-p'
+#  on the command line.
+#
+listen {
+	#  Type of packets to listen for.
+	#  Allowed values are:
+	#	auth	listen for authentication packets
+	#	acct	listen for accounting packets
+	#	proxy   IP to use for sending proxied packets
+	#	detail  Read from the detail file.  For examples, see
+	#               raddb/sites-available/copy-acct-to-home-server
+	#	status  listen for Status-Server packets.  For examples,
+	#		see raddb/sites-available/status
+	#	coa     listen for CoA-Request and Disconnect-Request
+	#		packets.  For examples, see the file
+	#		raddb/sites-available/coa-server
+	#
+	type = auth
+
+	#  Note: "type = proxy" lets you control the source IP used for
+	#        proxying packets, with some limitations:
+	#
+	#    * A proxy listener CANNOT be used in a virtual server section.
+	#    * You should probably set "port = 0".
+	#    * Any "clients" configuration will be ignored.
+	#
+	#  See also proxy.conf, and the "src_ipaddr" configuration entry
+	#  in the sample "home_server" section.  When you specify the
+	#  source IP address for packets sent to a home server, the
+	#  proxy listeners are automatically created.
+
+	#  IP address on which to listen.
+	#  Allowed values are:
+	#	dotted quad (1.2.3.4)
+	#       hostname    (radius.example.com)
+	#       wildcard    (*)
+	ipaddr = *
+
+	#  OR, you can use an IPv6 address, but not both
+	#  at the same time.
+#	ipv6addr = ::	# any.  ::1 == localhost
+
+	#  Port on which to listen.
+	#  Allowed values are:
+	#	integer port number (1812)
+	#	0 means "use /etc/services for the proper port"
+	port = 0
+
+	#  Some systems support binding to an interface, in addition
+	#  to the IP address.  This feature isn't strictly necessary,
+	#  but for sites with many IP addresses on one interface,
+	#  it's useful to say "listen on all addresses for eth0".
+	#
+	#  If your system does not support this feature, you will
+	#  get an error if you try to use it.
+	#
+#	interface = eth0
+
+	#  Per-socket lists of clients.  This is a very useful feature.
+	#
+	#  The name here is a reference to a section elsewhere in
+	#  radiusd.conf, or clients.conf.  Having the name as
+	#  a reference allows multiple sockets to use the same
+	#  set of clients.
+	#
+	#  If this configuration is used, then the global list of clients
+	#  is IGNORED for this "listen" section.  Take care configuring
+	#  this feature, to ensure you don't accidentally disable a
+	#  client you need.
+	#
+	#  See clients.conf for the configuration of "per_socket_clients".
+	#
+#	clients = per_socket_clients
+
+	#
+	#  Connection limiting for sockets with "proto = tcp".
+	#
+	#  This section is ignored for other kinds of sockets.
+	#
+	limit {
+	      #
+	      #  Limit the number of simultaneous TCP connections to the socket
+	      #
+	      #  The default is 16.
+	      #  Setting this to 0 means "no limit"
+	      max_connections = 16
+
+	      #  The per-socket "max_requests" option does not exist.
+
+	      #
+	      #  The lifetime, in seconds, of a TCP connection.  After
+	      #  this lifetime, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "forever".
+	      lifetime = 0
+
+	      #
+	      #  The idle timeout, in seconds, of a TCP connection.
+	      #  If no packets have been received over the connection for
+	      #  this time, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "no timeout".
+	      #
+	      #  We STRONGLY RECOMMEND that you set an idle timeout.
+	      #
+	      idle_timeout = 30
+	}
+}
+
+#
+#  This second "listen" section is for listening on the accounting
+#  port, too.
+#
+listen {
+	ipaddr = *
+#	ipv6addr = ::
+	port = 0
+	type = acct
+#	interface = eth0
+#	clients = per_socket_clients
+
+	limit {
+		#  The number of packets received can be rate limited via the
+		#  "max_pps" configuration item.  When it is set, the server
+		#  tracks the total number of packets received in the previous
+		#  second.  If the count is greater than "max_pps", then the
+		#  new packet is silently discarded.  This helps the server
+		#  deal with overload situations.
+		#
+		#  The packets/s counter is tracked in a sliding window.  This
+		#  means that the pps calculation is done for the second
+		#  before the current packet was received.  NOT for the current
+		#  wall-clock second, and NOT for the previous wall-clock second.
+		#
+		#  Useful values are 0 (no limit), or 100 to 10000.
+		#  Values lower than 100 will likely cause the server to ignore
+		#  normal traffic.  Few systems are capable of handling more than
+		#  10K packets/s.
+		#
+		#  It is most useful for accounting systems.  Set it to 50%
+		#  more than the normal accounting load, and you can be sure that
+		#  the server will never get overloaded
+		#
+#		max_pps = 0
+
+		# Only for "proto = tcp". These are ignored for "udp" sockets.
+		#
+#		idle_timeout = 0
+#		lifetime = 0
+#		max_connections = 0
+	}
+}
+
+#  Authorization. First preprocess (hints and huntgroups files),
+#  then realms, and finally look in the "users" file.
+#
+#  Any changes made here should also be made to the "inner-tunnel"
+#  virtual server.
+#
+#  The order of the realm modules will determine the order that
+#  we try to find a matching realm.
+#
+#  Make *sure* that 'preprocess' comes before any realm if you
+#  need to setup hints for the remote radius server
+authorize {
+	#
+	#  Take a User-Name, and perform some checks on it, for spaces and other
+	#  invalid characters.  If the User-Name appears invalid, reject the
+	#  request.
+	#
+	#  See policy.d/filter for the definition of the filter_username policy.
+	#
+	filter_username
+
+	#
+	#  The preprocess module takes care of sanitizing some bizarre
+	#  attributes in the request, and turning them into attributes
+	#  which are more standard.
+	#
+	#  It takes care of processing the 'raddb/hints' and the
+	#  'raddb/huntgroups' files.
+	preprocess
+
+	#  If you intend to use CUI and you require that the Operator-Name
+	#  be set for CUI generation and you want to generate CUI also
+	#  for your local clients then uncomment the operator-name
+	#  below and set the operator-name for your clients in clients.conf
+#       operator-name
+
+	#
+	#  If you want to generate CUI for some clients that do not
+	#  send proper CUI requests, then uncomment the
+	#  cui below and set "add_cui = yes" for these clients in clients.conf
+#       cui
+
+	#
+	#  If you want to have a log of authentication requests,
+	#  un-comment the following line, and the 'detail auth_log'
+	#  section, above.
+#	auth_log
+
+	#
+	#  The chap module will set 'Auth-Type := CHAP' if we are
+	#  handling a CHAP request and Auth-Type has not already been set
+	#chap
+
+	#
+	#  If the users are logging in with an MS-CHAP-Challenge
+	#  attribute for authentication, the mschap module will find
+	#  the MS-CHAP-Challenge attribute, and add 'Auth-Type := MS-CHAP'
+	#  to the request, which will cause the server to then use
+	#  the mschap module for authentication.
+	#mschap
+
+	#
+	#  If you have a Cisco SIP server authenticating against
+	#  FreeRADIUS, uncomment the following line, and the 'digest'
+	#  line in the 'authenticate' section.
+	#digest
+
+	#
+	#  The WiMAX specification says that the Calling-Station-Id
+	#  is 6 octets of the MAC.  This definition conflicts with
+	#  RFC 3580, and all common RADIUS practices.  Un-commenting
+	#  the "wimax" module here means that it will fix the
+	#  Calling-Station-Id attribute to the normal format as
+	#  specified in RFC 3580 Section 3.21
+#	wimax
+
+	#
+	#  Look for IPASS style 'realm/', and if not found, look for
+	#  '@realm', and decide whether or not to proxy, based on
+	#  that.
+#	IPASS
+
+	#
+	#  If you are using multiple kinds of realms, you probably
+	#  want to set "ignore_null = yes" for all of them.
+	#  Otherwise, when the first style of realm doesn't match,
+	#  the other styles won't be checked.
+	#
+	suffix
+#	ntdomain
+
+	#
+	#  This module takes care of EAP-MD5, EAP-TLS, and EAP-LEAP
+	#  authentication.
+	#
+	#  It also sets the EAP-Type attribute in the request
+	#  attribute list to the EAP type from the packet.
+	#
+	#  As of 2.0, the EAP module returns "ok" in the authorize stage
+	#  for TTLS and PEAP.  In 1.x, it never returned "ok" here, so
+	#  this change is compatible with older configurations.
+	#
+	#  The example below uses module failover to avoid querying all
+	#  of the following modules if the EAP module returns "ok".
+	#  Therefore, your LDAP and/or SQL servers will not be queried
+	#  for the many packets that go back and forth to set up TTLS
+	#  or PEAP.  The load on those servers will therefore be reduced.
+	#
+	eap {
+		ok = return
+	}
+
+	#
+	#  Pull crypt'd passwords from /etc/passwd or /etc/shadow,
+	#  using the system API's to get the password.  If you want
+	#  to read /etc/passwd or /etc/shadow directly, see the
+	#  passwd module in radiusd.conf.
+	#
+#	unix
+
+	#
+	#  Read the 'users' file
+	files
+
+	#
+	#  Look in an SQL database.  The schema of the database
+	#  is meant to mirror the "users" file.
+	#
+	#  See "Authorization Queries" in sql.conf
+	-sql
+
+	#
+	#  If you are using /etc/smbpasswd, and are also doing
+	#  mschap authentication, the un-comment this line, and
+	#  configure the 'smbpasswd' module.
+#	smbpasswd
+
+	#
+	#  The ldap module reads passwords from the LDAP database.
+	-ldap
+
+	#
+	#  Enforce daily limits on time spent logged in.
+#	daily
+
+	#
+	expiration
+	logintime
+
+	#
+	#  If no other module has claimed responsibility for
+	#  authentication, then try to use PAP.  This allows the
+	#  other modules listed above to add a "known good" password
+	#  to the request, and to do nothing else.  The PAP module
+	#  will then see that password, and use it to do PAP
+	#  authentication.
+	#
+	#  This module should be listed last, so that the other modules
+	#  get a chance to set Auth-Type for themselves.
+	#
+	pap
+
+	#
+	#  If "status_server = yes", then Status-Server messages are passed
+	#  through the following section, and ONLY the following section.
+	#  This permits you to do DB queries, for example.  If the modules
+	#  listed here return "fail", then NO response is sent.
+	#
+#	Autz-Type Status-Server {
+#
+#	}
+}
+
+
+#  Authentication.
+#
+#
+#  This section lists which modules are available for authentication.
+#  Note that it does NOT mean 'try each module in order'.  It means
+#  that a module from the 'authorize' section adds a configuration
+#  attribute 'Auth-Type := FOO'.  That authentication type is then
+#  used to pick the appropriate module from the list below.
+#
+
+#  In general, you SHOULD NOT set the Auth-Type attribute.  The server
+#  will figure it out on its own, and will do the right thing.  The
+#  most common side effect of erroneously setting the Auth-Type
+#  attribute is that one authentication method will work, but the
+#  others will not.
+#
+#  The common reasons to set the Auth-Type attribute by hand
+#  is to either forcibly reject the user (Auth-Type := Reject),
+#  or to or forcibly accept the user (Auth-Type := Accept).
+#
+#  Note that Auth-Type := Accept will NOT work with EAP.
+#
+#  Please do not put "unlang" configurations into the "authenticate"
+#  section.  Put them in the "post-auth" section instead.  That's what
+#  the post-auth section is for.
+#
+authenticate {
+	#
+	#  PAP authentication, when a back-end database listed
+	#  in the 'authorize' section supplies a password.  The
+	#  password can be clear-text, or encrypted.
+	Auth-Type PAP {
+		pap
+	}
+
+	#
+	#  Most people want CHAP authentication
+	#  A back-end database listed in the 'authorize' section
+	#  MUST supply a CLEAR TEXT password.  Encrypted passwords
+	#  won't work.
+	#Auth-Type CHAP {
+#
+#		chap
+#	}
+
+	#
+	#  MSCHAP authentication.
+#	Auth-Type MS-CHAP {
+#		mschap
+#	}
+
+	#
+	#  If you have a Cisco SIP server authenticating against
+	#  FreeRADIUS, uncomment the following line, and the 'digest'
+	#  line in the 'authorize' section.
+#	digest
+
+	#
+	#  Pluggable Authentication Modules.
+#	pam
+
+	#  Uncomment it if you want to use ldap for authentication
+	#
+	#  Note that this means "check plain-text password against
+	#  the ldap database", which means that EAP won't work,
+	#  as it does not supply a plain-text password.
+	#
+	#  We do NOT recommend using this.  LDAP servers are databases.
+	#  They are NOT authentication servers.  FreeRADIUS is an
+	#  authentication server, and knows what to do with authentication.
+	#  LDAP servers do not.
+	#
+#	Auth-Type LDAP {
+#		ldap
+#	}
+
+	#
+	#  Allow EAP authentication.
+	eap
+
+	#
+	#  The older configurations sent a number of attributes in
+	#  Access-Challenge packets, which wasn't strictly correct.
+	#  If you want to filter out these attributes, uncomment
+	#  the following lines.
+	#
+#	Auth-Type eap {
+#		eap {
+#			handled = 1
+#		}
+#		if (handled && (Response-Packet-Type == Access-Challenge)) {
+#			attr_filter.access_challenge.post-auth
+#			handled  # override the "updated" code from attr_filter
+#		}
+#	}
+}
+
+
+#
+#  Pre-accounting.  Decide which accounting type to use.
+#
+preacct {
+	preprocess
+
+	#
+	#  Merge Acct-[Input|Output]-Gigawords and Acct-[Input-Output]-Octets
+	#  into a single 64bit counter Acct-[Input|Output]-Octets64.
+	#
+#	acct_counters64
+
+	#
+	#  Session start times are *implied* in RADIUS.
+	#  The NAS never sends a "start time".  Instead, it sends
+	#  a start packet, *possibly* with an Acct-Delay-Time.
+	#  The server is supposed to conclude that the start time
+	#  was "Acct-Delay-Time" seconds in the past.
+	#
+	#  The code below creates an explicit start time, which can
+	#  then be used in other modules.  It will be *mostly* correct.
+	#  Any errors are due to the 1-second resolution of RADIUS,
+	#  and the possibility that the time on the NAS may be off.
+	#
+	#  The start time is: NOW - delay - session_length
+	#
+
+#	update request {
+#	  	FreeRADIUS-Acct-Session-Start-Time = "%{expr: %l - %{%{Acct-Session-Time}:-0} - %{%{Acct-Delay-Time}:-0}}"
+#	}
+
+
+	#
+	#  Ensure that we have a semi-unique identifier for every
+	#  request, and many NAS boxes are broken.
+	acct_unique
+
+	#
+	#  Look for IPASS-style 'realm/', and if not found, look for
+	#  '@realm', and decide whether or not to proxy, based on
+	#  that.
+	#
+	#  Accounting requests are generally proxied to the same
+	#  home server as authentication requests.
+#	IPASS
+	suffix
+#	ntdomain
+
+	#
+	#  Read the 'acct_users' file
+	files
+}
+
+#
+#  Accounting.  Log the accounting data.
+#
+accounting {
+	#  Update accounting packet by adding the CUI attribute
+	#  recorded from the corresponding Access-Accept
+	#  use it only if your NAS boxes do not support CUI themselves
+#       cui
+	#
+	#  Create a 'detail'ed log of the packets.
+	#  Note that accounting requests which are proxied
+	#  are also logged in the detail file.
+	detail
+#	daily
+
+	#  Update the wtmp file
+	#
+	#  If you don't use "radlast", you can delete this line.
+	unix
+
+	#
+	#  For Simultaneous-Use tracking.
+	#
+	#  Due to packet losses in the network, the data here
+	#  may be incorrect.  There is little we can do about it.
+#	radutmp
+#	sradutmp
+
+	#  Return an address to the IP Pool when we see a stop record.
+#	main_pool
+
+	#
+	#  Log traffic to an SQL database.
+	#
+	#  See "Accounting queries" in sql.conf
+	-sql
+
+	#
+	#  If you receive stop packets with zero session length,
+	#  they will NOT be logged in the database.  The SQL module
+	#  will print a message (only in debugging mode), and will
+	#  return "noop".
+	#
+	#  You can ignore these packets by uncommenting the following
+	#  three lines.  Otherwise, the server will not respond to the
+	#  accounting request, and the NAS will retransmit.
+	#
+#	if (noop) {
+#		ok
+#	}
+
+	#
+	#  Instead of sending the query to the SQL server,
+	#  write it into a log file.
+	#
+#	sql_log
+
+	#  Cisco VoIP specific bulk accounting
+#	pgsql-voip
+
+	# For Exec-Program and Exec-Program-Wait
+	exec
+
+	#  Filter attributes from the accounting response.
+	attr_filter.accounting_response
+
+	#
+	#  See "Autz-Type Status-Server" for how this works.
+	#
+#	Acct-Type Status-Server {
+#
+#	}
+}
+
+
+#  Session database, used for checking Simultaneous-Use. Either the radutmp
+#  or rlm_sql module can handle this.
+#  The rlm_sql module is *much* faster
+session {
+#	radutmp
+
+	#
+	#  See "Simultaneous Use Checking Queries" in sql.conf
+#	sql
+}
+
+
+#  Post-Authentication
+#  Once we KNOW that the user has been authenticated, there are
+#  additional steps we can take.
+post-auth {
+	#  Get an address from the IP Pool.
+#	main_pool
+
+
+	#  Create the CUI value and add the attribute to Access-Accept.
+	#  Uncomment the line below if *returning* the CUI.
+#       cui
+
+	#
+	#  If you want to have a log of authentication replies,
+	#  un-comment the following line, and enable the
+	#  'detail reply_log' module.
+#	reply_log
+
+	#
+	#  After authenticating the user, do another SQL query.
+	#
+	#  See "Authentication Logging Queries" in sql.conf
+	-sql
+
+	#
+	#  Instead of sending the query to the SQL server,
+	#  write it into a log file.
+	#
+#	sql_log
+
+	#
+	#  Un-comment the following if you want to modify the user's object
+	#  in LDAP after a successful login.
+	#
+#	ldap
+
+	# For Exec-Program and Exec-Program-Wait
+	exec
+
+	#
+	#  Calculate the various WiMAX keys.  In order for this to work,
+	#  you will need to define the WiMAX NAI, usually via
+	#
+	#	update request {
+	#	       WiMAX-MN-NAI = "%{User-Name}"
+	#	}
+	#
+	#  If you want various keys to be calculated, you will need to
+	#  update the reply with "template" values.  The module will see
+	#  this, and replace the template values with the correct ones
+	#  taken from the cryptographic calculations.  e.g.
+	#
+	# 	update reply {
+	#		WiMAX-FA-RK-Key = 0x00
+	#		WiMAX-MSK = "%{EAP-MSK}"
+	#	}
+	#
+	#  You may want to delete the MS-MPPE-*-Keys from the reply,
+	#  as some WiMAX clients behave badly when those attributes
+	#  are included.  See "raddb/modules/wimax", configuration
+	#  entry "delete_mppe_keys" for more information.
+	#
+#	wimax
+
+
+	#  If there is a client certificate (EAP-TLS, sometimes PEAP
+	#  and TTLS), then some attributes are filled out after the
+	#  certificate verification has been performed.  These fields
+	#  MAY be available during the authentication, or they may be
+	#  available only in the "post-auth" section.
+	#
+	#  The first set of attributes contains information about the
+	#  issuing certificate which is being used.  The second
+	#  contains information about the client certificate (if
+	#  available).
+#
+#	update reply {
+#	       Reply-Message += "%{TLS-Cert-Serial}"
+#	       Reply-Message += "%{TLS-Cert-Expiration}"
+#	       Reply-Message += "%{TLS-Cert-Subject}"
+#	       Reply-Message += "%{TLS-Cert-Issuer}"
+#	       Reply-Message += "%{TLS-Cert-Common-Name}"
+#	       Reply-Message += "%{TLS-Cert-Subject-Alt-Name-Email}"
+#
+#	       Reply-Message += "%{TLS-Client-Cert-Serial}"
+#	       Reply-Message += "%{TLS-Client-Cert-Expiration}"
+#	       Reply-Message += "%{TLS-Client-Cert-Subject}"
+#	       Reply-Message += "%{TLS-Client-Cert-Issuer}"
+#	       Reply-Message += "%{TLS-Client-Cert-Common-Name}"
+#	       Reply-Message += "%{TLS-Client-Cert-Subject-Alt-Name-Email}"
+#	}
+
+	#  Insert class attribute (with unique value) into response,
+	#  aids matching auth and acct records, and protects against duplicate
+	#  Acct-Session-Id. Note: Only works if the NAS has implemented
+	#  RFC 2865 behaviour for the class attribute, AND if the NAS
+	#  supports long Class attributes.  Many older or cheap NASes
+	#  only support 16-octet Class attributes.
+#	insert_acct_class
+
+	#  MacSEC requires the use of EAP-Key-Name.  However, we don't
+	#  want to send it for all EAP sessions.  Therefore, the EAP
+	#  modules put required data into the EAP-Session-Id attribute.
+	#  This attribute is never put into a request or reply packet.
+	#
+	#  Uncomment the next few lines to copy the required data into
+	#  the EAP-Key-Name attribute
+#	if (reply:EAP-Session-Id) {
+#		update reply {
+#			EAP-Key-Name := "%{reply:EAP-Session-Id}"
+#		}
+#	}
+
+	#  Remove reply message if the response contains an EAP-Message
+	remove_reply_message_if_eap
+
+	#
+	#  Access-Reject packets are sent through the REJECT sub-section of the
+	#  post-auth section.
+	#
+	#  Add the ldap module name (or instance) if you have set
+	#  'edir_account_policy_check = yes' in the ldap module configuration
+	#
+	Post-Auth-Type REJECT {
+		# log failed authentications in SQL, too.
+		-sql
+		attr_filter.access_reject
+
+		# Insert EAP-Failure message if the request was
+		# rejected by policy instead of because of an
+		# authentication failure
+		eap
+
+		#  Remove reply message if the response contains an EAP-Message
+		remove_reply_message_if_eap
+	}
+}
+
+#
+#  When the server decides to proxy a request to a home server,
+#  the proxied request is first passed through the pre-proxy
+#  stage.  This stage can re-write the request, or decide to
+#  cancel the proxy.
+#
+#  Only a few modules currently have this method.
+#
+pre-proxy {
+	# Before proxing the request add an Operator-Name attribute identifying
+	# if the operator-name is found for this client.
+	# No need to uncomment this if you have already enabled this in
+	# the authorize section.
+#       operator-name
+
+	#  The client requests the CUI by sending a CUI attribute
+	#  containing one zero byte.
+	#  Uncomment the line below if *requesting* the CUI.
+#       cui
+
+	#  Uncomment the following line if you want to change attributes
+	#  as defined in the preproxy_users file.
+#	files
+
+	#  Uncomment the following line if you want to filter requests
+	#  sent to remote servers based on the rules defined in the
+	#  'attrs.pre-proxy' file.
+#	attr_filter.pre-proxy
+
+	#  If you want to have a log of packets proxied to a home
+	#  server, un-comment the following line, and the
+	#  'detail pre_proxy_log' section, above.
+#	pre_proxy_log
+}
+
+#
+#  When the server receives a reply to a request it proxied
+#  to a home server, the request may be massaged here, in the
+#  post-proxy stage.
+#
+post-proxy {
+
+	#  If you want to have a log of replies from a home server,
+	#  un-comment the following line, and the 'detail post_proxy_log'
+	#  section, above.
+#	post_proxy_log
+
+	#  Uncomment the following line if you want to filter replies from
+	#  remote proxies based on the rules defined in the 'attrs' file.
+#	attr_filter.post-proxy
+
+	#
+	#  If you are proxying LEAP, you MUST configure the EAP
+	#  module, and you MUST list it here, in the post-proxy
+	#  stage.
+	#
+	#  You MUST also use the 'nostrip' option in the 'realm'
+	#  configuration.  Otherwise, the User-Name attribute
+	#  in the proxied request will not match the user name
+	#  hidden inside of the EAP packet, and the end server will
+	#  reject the EAP request.
+	#
+	eap
+
+	#
+	#  If the server tries to proxy a request and fails, then the
+	#  request is processed through the modules in this section.
+	#
+	#  The main use of this section is to permit robust proxying
+	#  of accounting packets.  The server can be configured to
+	#  proxy accounting packets as part of normal processing.
+	#  Then, if the home server goes down, accounting packets can
+	#  be logged to a local "detail" file, for processing with
+	#  radrelay.  When the home server comes back up, radrelay
+	#  will read the detail file, and send the packets to the
+	#  home server.
+	#
+	#  With this configuration, the server always responds to
+	#  Accounting-Requests from the NAS, but only writes
+	#  accounting packets to disk if the home server is down.
+	#
+#	Post-Proxy-Type Fail {
+#			detail
+#	}
+}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dhcp b/src/test/setup/radius-config/freeradius/sites-available/dhcp
new file mode 100644
index 0000000..42760ef
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/dhcp
@@ -0,0 +1,279 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a virtual server that handles DHCP.
+#
+#	$Id: 170e2b191af7184b519d3594fa99476c857dfda5 $
+#
+######################################################################
+
+#
+#  The DHCP functionality goes into a virtual server.
+#
+server dhcp {
+
+#  Define a DHCP socket.
+#
+#  The default port below is 6700, so you don't break your network.
+#  If you want it to do real DHCP, change this to 67, and good luck!
+#
+#  You can also bind the DHCP socket to an interface.
+#  See below, and raddb/radiusd.conf for examples.
+#
+#  This lets you run *one* DHCP server instance and have it listen on
+#  multiple interfaces, each with a separate policy.
+#
+#  If you have multiple interfaces, it is a good idea to bind the
+#  listen section to an interface.  You will also need one listen
+#  section per interface.
+#
+#  FreeBSD does *not* support binding sockets to interfaces.  Therefore,
+#  if you have multiple interfaces, broadcasts may go out of the wrong
+#  one, or even all interfaces.  The solution is to use the "setfib" command.
+#  If you have a network "10.10.0/24" on LAN1, you will need to do:
+#
+#  Pick any IP on the 10.10.0/24 network
+#	$ setfib 1 route add default 10.10.0.1
+#
+#  Edit /etc/rc.local, and add a line:
+#	setfib 1 /path/to/radiusd
+#
+#  The kern must be built with the following options:
+#	options    ROUTETABLES=2
+#  or any value larger than 2.
+#
+# The other only solution is to update FreeRADIUS to use BPF sockets.
+#
+listen {
+	#  This is a dhcp socket.
+	type = dhcp
+
+	#  IP address to listen on. Will usually be the IP of the
+	#  interface, or 0.0.0.0
+	ipaddr = 127.0.0.1
+
+	#  source IP address for unicast packets sent by the
+	#  DHCP server.
+	#
+	#  The source IP for unicast packets is chosen from the first
+	#  one of the following items which returns a valid IP
+	#  address:
+	#
+	#	src_ipaddr
+	#	ipaddr
+	#	reply:DHCP-Server-IP-Address
+	#	reply:DHCP-DHCP-Server-Identifier
+	#
+	src_ipaddr = 127.0.0.1
+
+	#  The port should be 67 for a production network. Don't set
+	#  it to 67 on a production network unless you really know
+	#  what you're doing. Even if nothing is configured below, the
+	#  server may still NAK legitimate responses from clients.
+	port = 6700
+
+	#  Interface name we are listening on. See comments above.
+#	interface = lo0
+
+	# The DHCP server defaults to allowing broadcast packets.
+	# Set this to "no" only when the server receives *all* packets
+	# from a relay agent.  i.e. when *no* clients are on the same
+	# LAN as the DHCP server.
+	#
+	# It's set to "no" here for testing. It will usually want to
+	# be "yes" in production, unless you are only dealing with
+	# relayed packets.
+	broadcast = no
+
+	# On Linux if you're running the server as non-root, you
+	# will need to do:
+	#
+	#	sudo setcap cap_net_admin=ei /path/to/radiusd
+	#
+	# This will allow the server to set ARP table entries
+	# for newly allocated IPs
+}
+
+#  Packets received on the socket will be processed through one
+#  of the following sections, named after the DHCP packet type.
+#  See dictionary.dhcp for the packet types.
+
+#  Return packets will be sent to, in preference order:
+#     DHCP-Gateway-IP-Address
+#     DHCP-Client-IP-Address
+#     DHCP-Your-IP-Address
+#  At least one of these attributes should be set at the end of each
+#  section for a response to be sent.
+
+dhcp DHCP-Discover {
+
+	#  Set the type of packet to send in reply.
+	#
+	#  The server will look at the DHCP-Message-Type attribute to
+	#  determine which type of packet to send in reply. Common
+	#  values would be DHCP-Offer, DHCP-Ack or DHCP-NAK. See
+	#  dictionary.dhcp for all the possible values.
+	#
+	#  DHCP-Do-Not-Respond can be used to tell the server to not
+	#  respond.
+	#
+	#  In the event that DHCP-Message-Type is not set then the
+	#  server will fall back to determining the type of reply
+	#  based on the rcode of this section.
+
+	update reply {
+	       DHCP-Message-Type = DHCP-Offer
+	}
+
+	#  The contents here are invented.  Change them!
+	update reply {
+		DHCP-Domain-Name-Server = 127.0.0.1
+		DHCP-Domain-Name-Server = 127.0.0.2
+		DHCP-Subnet-Mask = 255.255.255.0
+		DHCP-Router-Address = 192.0.2.1
+		DHCP-IP-Address-Lease-Time = 86400
+		DHCP-DHCP-Server-Identifier = 192.0.2.1
+	}
+
+	#  Do a simple mapping of MAC to assigned IP.
+	#
+	#  See below for the definition of the "mac2ip"
+	#  module.
+	#
+	#mac2ip
+
+	#  If the MAC wasn't found in that list, do something else.
+	#  You could call a Perl, Python, or Java script here.
+
+	#if (notfound) {
+	# ...
+	#}
+
+	#  Or, allocate IPs from the DHCP pool in SQL. You may need to
+	#  set the pool name here if you haven't set it elsewhere.
+#	update control {
+#		Pool-Name := "local"
+#	}
+#	dhcp_sqlippool
+
+	#  If DHCP-Message-Type is not set, returning "ok" or
+	#  "updated" from this section will respond with a DHCP-Offer
+	#  message.
+	#
+	#  Other rcodes will tell the server to not return any response.
+	ok
+}
+
+dhcp DHCP-Request {
+
+	# Response packet type. See DHCP-Discover section above.
+	update reply {
+	       DHCP-Message-Type = DHCP-Ack
+	}
+
+	#  The contents here are invented.  Change them!
+	update reply {
+		DHCP-Domain-Name-Server = 127.0.0.1
+		DHCP-Domain-Name-Server = 127.0.0.2
+		DHCP-Subnet-Mask = 255.255.255.0
+		DHCP-Router-Address = 192.0.2.1
+		DHCP-IP-Address-Lease-Time = 86400
+		DHCP-DHCP-Server-Identifier = 192.0.2.1
+	}
+
+	#  Do a simple mapping of MAC to assigned IP.
+	#
+	#  See below for the definition of the "mac2ip"
+	#  module.
+	#
+	#mac2ip
+
+	#  If the MAC wasn't found in that list, do something else.
+	#  You could call a Perl, Python, or Java script here.
+
+	#if (notfound) {
+	# ...
+	#}
+
+	#  Or, allocate IPs from the DHCP pool in SQL. You may need to
+	#  set the pool name here if you haven't set it elsewhere.
+#	update control {
+#		Pool-Name := "local"
+#	}
+#	dhcp_sqlippool
+
+	#  If DHCP-Message-Type is not set, returning "ok" or
+	#  "updated" from this section will respond with a DHCP-Ack
+	#  packet.
+	#
+	#  "handled" will not return a packet, all other rcodes will
+	#  send back a DHCP-NAK.
+	ok
+}
+
+#
+#  Other DHCP packet types
+#
+#  There should be a separate section for each DHCP message type.
+#  By default this configuration will ignore them all. Any packet type
+#  not defined here will be responded to with a DHCP-NAK.
+
+dhcp DHCP-Decline {
+	update reply {
+	       DHCP-Message-Type = DHCP-Do-Not-Respond
+	}
+	reject
+}
+
+dhcp DHCP-Inform {
+	update reply {
+	       DHCP-Message-Type = DHCP-Do-Not-Respond
+	}
+	reject
+}
+
+dhcp DHCP-Release {
+	update reply {
+	       DHCP-Message-Type = DHCP-Do-Not-Respond
+	}
+	reject
+}
+
+
+}
+
+######################################################################
+#
+#  This next section is a sample configuration for the "passwd"
+#  module, that reads flat-text files.  It should go into
+#  radiusd.conf, in the "modules" section.
+#
+#  The file is in the format <mac>,<ip>
+#
+#	00:01:02:03:04:05,192.0.2.100
+#	01:01:02:03:04:05,192.0.2.101
+#	02:01:02:03:04:05,192.0.2.102
+#
+#  This lets you perform simple static IP assignment.
+#
+#  There is a preconfigured "mac2ip" module setup in
+#  mods-available/mac2ip. To use it do:
+#
+#    # cd raddb/
+#    # ln -s ../mods-available/mac2ip mods-enabled/mac2ip
+#    # mkdir mods-config/passwd
+#
+#  Then create the file mods-config/passwd/mac2ip with the above
+#  format.
+#
+######################################################################
+
+
+#  This is an example only - see mods-available/mac2ip instead; do
+#  not uncomment these lines here.
+#
+#passwd mac2ip {
+#	filename = ${confdir}/mac2ip
+#	format = "*DHCP-Client-Hardware-Address:=DHCP-Your-IP-Address"
+#	delimiter = ","
+#}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay b/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay
new file mode 100644
index 0000000..737cc5d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/dhcp.relay
@@ -0,0 +1,44 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a virtual server that handles DHCP relaying
+#
+#	Only one server can listen on a socket, so you cannot
+#	do DHCP relaying && run a DHCP server at the same time.
+#
+######################################################################
+
+server dhcp.eth1 {
+	listen {
+		ipaddr = *
+		port = 67
+		type = dhcp
+		interface = eth1
+	}
+
+	#  Packets received on the socket will be processed through one
+	#  of the following sections, named after the DHCP packet type.
+	#  See dictionary.dhcp for the packet types.
+	dhcp DHCP-Discover {
+		update config {
+			# IP Address of the DHCP server
+			DHCP-Relay-To-IP-Address := 192.0.2.2
+		}
+		update request {
+			# IP Address of the DHCP relay (ourselves)
+			DHCP-Gateway-IP-Address := 192.0.2.1
+		}
+		ok
+	}
+
+	dhcp DHCP-Request {
+		update config {
+			# IP Address of the DHCP server
+			DHCP-Relay-To-IP-Address := 192.0.2.2
+		}
+		update request {
+			DHCP-Gateway-IP-Address := 192.0.2.2
+		}
+		ok
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients b/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients
new file mode 100644
index 0000000..8f5edde
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/dynamic-clients
@@ -0,0 +1,224 @@
+# -*- text -*-
+######################################################################
+#
+#	Sample configuration file for dynamically updating the list
+#	of RADIUS clients at run time.
+#
+#	Everything is keyed off of a client "network".  (e.g. 192.0.2/24)
+#	This configuration lets the server know that clients within
+#	that network are defined dynamically.
+#
+#	When the server receives a packet from an unknown IP address
+#	within that network, it tries to find a dynamic definition
+#	for that client.  If the definition is found, the IP address
+#	(and other configuration) is added to the server's internal
+#	cache of "known clients", with a configurable lifetime.
+#
+#	Further packets from that IP address result in the client
+#	definition being found in the cache.  Once the lifetime is
+#	reached, the client definition is deleted, and any new requests
+#	from that client are looked up as above.
+#
+#	If the dynamic definition is not found, then the request is
+#	treated as if it came from an unknown client.  i.e. It is
+#	silently discarded.
+#
+#	As part of protection from Denial of Service (DoS) attacks,
+#	the server will add only one new client per second.  This CANNOT
+#	be changed, and is NOT configurable.
+#
+#	$Id: cdfa6175a9617bcd081b0b69f2c9340c3adaa56e $
+#
+######################################################################
+
+#
+#  Define a network where clients may be dynamically defined.
+client dynamic {
+	ipaddr = 192.0.2.0
+
+	#
+	#  You MUST specify a netmask!
+	#  IPv4 /32 or IPv6 /128 are NOT allowed!
+	netmask = 24
+
+	#
+	#  Any other configuration normally found in a "client"
+	#  entry can be used here.
+
+	#
+	#  A shared secret does NOT have to be defined.  It can
+	#  be left out.
+
+	#
+	#  Define the virtual server used to discover dynamic clients.
+	dynamic_clients = dynamic_clients
+
+	#
+	#  The directory where client definitions are stored.  This
+	#  needs to be used ONLY if the client definitions are stored
+	#  in flat-text files.  Each file in that directory should be
+	#  ONE and only one client definition.  The name of the file
+	#  should be the IP address of the client.
+	#
+	#  If you are storing clients in SQL, this entry should not
+	#  be used.
+#	directory = ${confdir}/dynamic-clients/
+
+	#
+	#  Define the lifetime (in seconds) for dynamic clients.
+	#  They will be cached for this lifetime, and deleted afterwards.
+	#
+	#  If the lifetime is "0", then the dynamic client is never
+	#  deleted.  The only way to delete the client is to re-start
+	#  the server.
+	lifetime = 3600
+}
+
+#
+#  This is the virtual server referenced above by "dynamic_clients".
+server dynamic_clients {
+
+	#
+	#  The only contents of the virtual server is the "authorize" section.
+	authorize {
+
+		#
+		#  Put any modules you want here.  SQL, LDAP, "exec",
+		#  Perl, etc.  The only requirements is that the
+		#  attributes MUST go into the control item list.
+		#
+		#  The request that is processed through this section
+		#  is EMPTY.  There are NO attributes.  The request is fake,
+		#  and is NOT the packet that triggered the lookup of
+		#  the dynamic client.
+		#
+		#  The ONLY piece of useful information is either
+		#
+		#	Packet-Src-IP-Address (IPv4 clients)
+		#	Packet-Src-IPv6-Address (IPv6 clients)
+		#
+		#  The attributes used to define a dynamic client mirror
+		#  the configuration items in the "client" structure.
+		#
+
+		#
+		#  Example 1: Hard-code a client IP.  This example is
+		#             useless, but it documents the attributes
+		#             you need.
+		#
+		update control {
+
+			#
+			#  Echo the IP address of the client.
+			FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
+
+			# require_message_authenticator
+			FreeRADIUS-Client-Require-MA = no
+
+			# secret
+			FreeRADIUS-Client-Secret = "testing123"
+
+			# shortname
+			FreeRADIUS-Client-Shortname = "%{Packet-Src-IP-Address}"
+
+			# nas_type
+			FreeRADIUS-Client-NAS-Type = "other"
+
+			# virtual_server
+			#
+			#  This can ONLY be used if the network client
+			#  definition (e.g. "client dynamic" above) has
+			#  NO virtual_server defined.
+			#
+			#  If the network client definition does have a
+			#  virtual_server defined, then that is used,
+			#  and there is no need to define this attribute.
+			#
+			FreeRADIUS-Client-Virtual-Server = "something"
+
+		}
+
+		#
+		#  Example 2: Read the clients from "clients" files
+		#             in a directory.
+		#
+
+		#             This requires you to uncomment the
+		#             "directory" configuration in the
+		#             "client dynamic" configuration above,
+		#	      and then put one file per IP address in
+		#             that directory.
+		#
+		dynamic_clients
+
+		#
+		#  Example 3: Look the clients up in SQL.
+		#
+		#  This requires the SQL module to be configured, of course.
+		if ("%{sql: SELECT nasname FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}") {
+			update control {
+				#
+				#  Echo the IP.
+				FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
+
+				#
+				#  Do multiple SELECT statements to grab
+				#  the various definitions.
+				FreeRADIUS-Client-Shortname = "%{sql: SELECT shortname FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
+
+				FreeRADIUS-Client-Secret = "%{sql: SELECT secret FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
+
+				FreeRADIUS-Client-NAS-Type = "%{sql: SELECT type FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
+
+				FreeRADIUS-Client-Virtual-Server = "%{sql: SELECT server FROM nas WHERE nasname = '%{Packet-Src-IP-Address}'}"
+			}
+
+		}
+
+		# Do an LDAP lookup in the elements OU, check to see if
+		# the Packet-Src-IP-Address object has a "ou"
+		# attribute, if it does continue.  Change "ACME.COM" to
+		# the real OU of your organization.
+		#
+		# Assuming the following schema:
+		#
+		# OU=Elements,OU=Radius,DC=ACME,DC=COM
+		#
+		# Elements will hold a record of every NAS in your
+		# Network.  Create Group objects based on the IP
+		# Address of the NAS and set the "Location" or "l"
+		# attribute to the NAS Huntgroup the NAS belongs to
+		# allow them to be centrally managed in LDAP.
+		#
+		# e.g.  CN=10.1.2.3,OU=Elements,OU=Radius,DC=ACME,DC=COM
+		#
+		# With a "l" value of "CiscoRTR" for a Cisco Router
+		# that has a NAS-IP-Address or Source-IP-Address of
+		# 10.1.2.3.
+		#
+		# And with a "ou" value of the shared secret password
+		# for the NAS element. ie "password"
+		if ("%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?ou?sub?cn=%{Packet-Src-IP-Address}}") {
+			update control {
+			       FreeRADIUS-Client-IP-Address = "%{Packet-Src-IP-Address}"
+
+				# Set the Client-Shortname to be the Location
+				# "l" just like in the Huntgroups, but this
+				# time to the shortname.
+
+				FreeRADIUS-Client-Shortname = "%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?l?sub?cn=%{Packet-Src-IP-Address}}"
+
+				# Lookup and set the Shared Secret based on
+				# the "ou" attribute.
+				FreeRADIUS-Client-Secret = "%{ldap:ldap:///OU=Elements,OU=Radius,DC=ACME,DC=COM?ou?sub?cn=%{Packet-Src-IP-Address}}"
+			}
+		}
+
+		#
+		#  Tell the caller that the client was defined properly.
+		#
+		#  If the authorize section does NOT return "ok", then
+		#  the new client is ignored.
+		ok
+	}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/example b/src/test/setup/radius-config/freeradius/sites-available/example
new file mode 100644
index 0000000..05522ea
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/example
@@ -0,0 +1,122 @@
+######################################################################
+#
+#	An example virtual server configuration.
+#
+#	$Id: e58e24319d6320a0a0d56fd649d937bf95156739 $
+#
+######################################################################
+
+
+#
+#	This client will be available to any "listen" section that
+#	are defined outside of a virtual server section.  However,
+#	when the server receives a packet from this client, the
+#	request will be processed through the "example" virtual
+#	server, as the "client" section contains a configuration item
+#	to that effect.
+#
+#	Note that this client will be able to send requests to any
+#	port defined in a global "listen" section.  It will NOT,
+#	however, be able to send requests to a port defined in a
+#	"listen" section that is contained in a "server" section.
+#
+#	With careful matching of configurations, you should be able
+#	to:
+#
+#	- Define one authentication port, but process each client
+#	  through a separate virtual server.
+#
+#	- define multiple authentication ports, each with a private
+#	  list of clients.
+#
+#	- define multiple authentication ports, each of which may
+#	  have the same client listed, but with different shared
+#	  secrets
+#
+#	FYI: We use an address in the 192.0.2.* space for this example,
+#	as RFC 3330 says that that /24 range is used for documentation
+#	and examples, and should not appear on the net.  You shouldn't
+#	use it for anything, either.
+#
+client 192.0.2.10 {
+	shortname	= example-client
+	secret		= testing123
+	virtual_server  = example
+}
+
+######################################################################
+#
+#	An example virtual server.  It starts off with "server name {"
+#	The "name" is used to reference this server from a "listen"
+#	or "client" section.
+#
+######################################################################
+server example {
+	#
+	#	Listen on 192.0.2.1:1812 for Access-Requests
+	#
+	#	When the server receives a packet, it is processed
+	#	through the "authorize", etc. sections listed here,
+	#	NOT the global ones the "default" site.
+	#
+	listen {
+		ipaddr = 192.0.2.1
+		port = 1821
+		type = auth
+	}
+
+	#
+	#	This client is listed within the "server" section,
+	#	and is therefore known ONLY to the socket defined
+	#	in the "listen" section above.  If the client IP
+	#	sends a request to a different socket, the server
+	#	will treat it as an unknown client, and will not
+	#	respond.
+	#
+	#	In contrast, the client listed at the top of this file
+	#	is outside of any "server" section, and is therefore
+	#	global in scope.  It can send packets to any port
+	#	defined in a global "listen" section.  It CANNOT send
+	#	packets to the listen section defined above, though.
+	#
+	#	Note that you don't have to have a "virtual_server = example"
+	#	line here, as the client is encapsulated within
+	#	the "server" section.
+	#
+	client 192.0.2.9 {
+		shortname	= example-client
+		secret		= testing123
+	}
+
+	authorize {
+		#
+		#  Some example policies.  See "man unlang" for more.
+		#
+		if ("%{User-Name}" == "bob") {
+			update control {
+				Cleartext-Password := "bob"
+			}
+		}
+
+		#
+		#  And then reject the user.  The next line requires
+		#  that the "always reject {}" section is defined in
+		#  the "modules" section of radiusd.conf.
+		#
+		reject
+	}
+
+	authenticate {
+
+	}
+
+	post-auth {
+
+		Post-Auth-Type Reject {
+			update reply {
+				Reply-Message = "This is only an example."
+			}
+		}
+	}
+
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel b/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel
new file mode 100644
index 0000000..dc7b7de
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/inner-tunnel
@@ -0,0 +1,408 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a virtual server that handles *only* inner tunnel
+#	requests for EAP-TTLS and PEAP types.
+#
+#	$Id: 11b6c12d845a1e8287888b3f0a0748d810b2c184 $
+#
+######################################################################
+
+server inner-tunnel {
+
+#
+#  This next section is here to allow testing of the "inner-tunnel"
+#  authentication methods, independently from the "default" server.
+#  It is listening on "localhost", so that it can only be used from
+#  the same machine.
+#
+#	$ radtest USER PASSWORD 127.0.0.1:18120 0 testing123
+#
+#  If it works, you have configured the inner tunnel correctly.  To check
+#  if PEAP will work, use:
+#
+#	$ radtest -t mschap USER PASSWORD 127.0.0.1:18120 0 testing123
+#
+#  If that works, PEAP should work.  If that command doesn't work, then
+#
+#	FIX THE INNER TUNNEL CONFIGURATION SO THAT IT WORKS.
+#
+#  Do NOT do any PEAP tests.  It won't help.  Instead, concentrate
+#  on fixing the inner tunnel configuration.  DO NOTHING ELSE.
+#
+listen {
+       ipaddr = 127.0.0.1
+       port = 18120
+       type = auth
+}
+
+
+#  Authorization. First preprocess (hints and huntgroups files),
+#  then realms, and finally look in the "users" file.
+#
+#  The order of the realm modules will determine the order that
+#  we try to find a matching realm.
+#
+#  Make *sure* that 'preprocess' comes before any realm if you
+#  need to setup hints for the remote radius server
+authorize {
+	#
+	#  The chap module will set 'Auth-Type := CHAP' if we are
+	#  handling a CHAP request and Auth-Type has not already been set
+	chap
+
+	#
+	#  If the users are logging in with an MS-CHAP-Challenge
+	#  attribute for authentication, the mschap module will find
+	#  the MS-CHAP-Challenge attribute, and add 'Auth-Type := MS-CHAP'
+	#  to the request, which will cause the server to then use
+	#  the mschap module for authentication.
+	mschap
+
+	#
+	#  Pull crypt'd passwords from /etc/passwd or /etc/shadow,
+	#  using the system API's to get the password.  If you want
+	#  to read /etc/passwd or /etc/shadow directly, see the
+	#  passwd module, above.
+	#
+#	unix
+
+	#
+	#  Look for IPASS style 'realm/', and if not found, look for
+	#  '@realm', and decide whether or not to proxy, based on
+	#  that.
+#	IPASS
+
+	#
+	#  If you are using multiple kinds of realms, you probably
+	#  want to set "ignore_null = yes" for all of them.
+	#  Otherwise, when the first style of realm doesn't match,
+	#  the other styles won't be checked.
+	#
+	#  Note that proxying the inner tunnel authentication means
+	#  that the user MAY use one identity in the outer session
+	#  (e.g. "anonymous", and a different one here
+	#  (e.g. "user@example.com").  The inner session will then be
+	#  proxied elsewhere for authentication.  If you are not
+	#  careful, this means that the user can cause you to forward
+	#  the authentication to another RADIUS server, and have the
+	#  accounting logs *not* sent to the other server.  This makes
+	#  it difficult to bill people for their network activity.
+	#
+	suffix
+#	ntdomain
+
+	#
+	#  The "suffix" module takes care of stripping the domain
+	#  (e.g. "@example.com") from the User-Name attribute, and the
+	#  next few lines ensure that the request is not proxied.
+	#
+	#  If you want the inner tunnel request to be proxied, delete
+	#  the next few lines.
+	#
+	update control {
+	       Proxy-To-Realm := LOCAL
+	}
+
+	#
+	#  This module takes care of EAP-MSCHAPv2 authentication.
+	#
+	#  It also sets the EAP-Type attribute in the request
+	#  attribute list to the EAP type from the packet.
+	#
+	#  The example below uses module failover to avoid querying all
+	#  of the following modules if the EAP module returns "ok".
+	#  Therefore, your LDAP and/or SQL servers will not be queried
+	#  for the many packets that go back and forth to set up TTLS
+	#  or PEAP.  The load on those servers will therefore be reduced.
+	#
+	eap {
+		ok = return
+	}
+
+	#
+	#  Read the 'users' file
+	files
+
+	#
+	#  Look in an SQL database.  The schema of the database
+	#  is meant to mirror the "users" file.
+	#
+	#  See "Authorization Queries" in sql.conf
+	-sql
+
+	#
+	#  If you are using /etc/smbpasswd, and are also doing
+	#  mschap authentication, the un-comment this line, and
+	#  configure the 'etc_smbpasswd' module, above.
+#	etc_smbpasswd
+
+	#
+	#  The ldap module reads passwords from the LDAP database.
+	-ldap
+
+	#
+	#  Enforce daily limits on time spent logged in.
+#	daily
+
+	expiration
+	logintime
+
+	#
+	#  If no other module has claimed responsibility for
+	#  authentication, then try to use PAP.  This allows the
+	#  other modules listed above to add a "known good" password
+	#  to the request, and to do nothing else.  The PAP module
+	#  will then see that password, and use it to do PAP
+	#  authentication.
+	#
+	#  This module should be listed last, so that the other modules
+	#  get a chance to set Auth-Type for themselves.
+	#
+	pap
+}
+
+
+#  Authentication.
+#
+#
+#  This section lists which modules are available for authentication.
+#  Note that it does NOT mean 'try each module in order'.  It means
+#  that a module from the 'authorize' section adds a configuration
+#  attribute 'Auth-Type := FOO'.  That authentication type is then
+#  used to pick the appropriate module from the list below.
+#
+
+#  In general, you SHOULD NOT set the Auth-Type attribute.  The server
+#  will figure it out on its own, and will do the right thing.  The
+#  most common side effect of erroneously setting the Auth-Type
+#  attribute is that one authentication method will work, but the
+#  others will not.
+#
+#  The common reasons to set the Auth-Type attribute by hand
+#  is to either forcibly reject the user, or forcibly accept him.
+#
+authenticate {
+	#
+	#  PAP authentication, when a back-end database listed
+	#  in the 'authorize' section supplies a password.  The
+	#  password can be clear-text, or encrypted.
+	Auth-Type PAP {
+		pap
+	}
+
+	#
+	#  Most people want CHAP authentication
+	#  A back-end database listed in the 'authorize' section
+	#  MUST supply a CLEAR TEXT password.  Encrypted passwords
+	#  won't work.
+	Auth-Type CHAP {
+		chap
+	}
+
+	#
+	#  MSCHAP authentication.
+	Auth-Type MS-CHAP {
+		mschap
+	}
+
+	#
+	#  Pluggable Authentication Modules.
+#	pam
+
+	# Uncomment it if you want to use ldap for authentication
+	#
+	# Note that this means "check plain-text password against
+	# the ldap database", which means that EAP won't work,
+	# as it does not supply a plain-text password.
+	#
+	#  We do NOT recommend using this.  LDAP servers are databases.
+	#  They are NOT authentication servers.  FreeRADIUS is an
+	#  authentication server, and knows what to do with authentication.
+	#  LDAP servers do not.
+	#
+#	Auth-Type LDAP {
+#		ldap
+#	}
+
+	#
+	#  Allow EAP authentication.
+	eap
+}
+
+######################################################################
+#
+#	There are no accounting requests inside of EAP-TTLS or PEAP
+#	tunnels.
+#
+######################################################################
+
+
+#  Session database, used for checking Simultaneous-Use. Either the radutmp
+#  or rlm_sql module can handle this.
+#  The rlm_sql module is *much* faster
+session {
+	radutmp
+
+	#
+	#  See "Simultaneous Use Checking Queries" in sql.conf
+#	sql
+}
+
+
+#  Post-Authentication
+#  Once we KNOW that the user has been authenticated, there are
+#  additional steps we can take.
+post-auth {
+	#  If you want privacy to remain, see the
+	#  Chargeable-User-Identity attribute from RFC 4372.
+	#  If you want to use it just uncomment the line below.
+#       cui-inner
+
+	#
+	#  If you want to have a log of authentication replies,
+	#  un-comment the following line, and enable the
+	#  'detail reply_log' module.
+#	reply_log
+
+	#
+	#  After authenticating the user, do another SQL query.
+	#
+	#  See "Authentication Logging Queries" in sql.conf
+	-sql
+
+	#
+	#  Instead of sending the query to the SQL server,
+	#  write it into a log file.
+	#
+#	sql_log
+
+	#
+	#  Un-comment the following if you have set
+	#  'edir_account_policy_check = yes' in the ldap module sub-section of
+	#  the 'modules' section.
+	#
+#	ldap
+
+	#
+	#  Access-Reject packets are sent through the REJECT sub-section of the
+	#  post-auth section.
+	#
+	#  Add the ldap module name (or instance) if you have set
+	#  'edir_account_policy_check = yes' in the ldap module configuration
+	#
+	Post-Auth-Type REJECT {
+		# log failed authentications in SQL, too.
+		-sql
+		attr_filter.access_reject
+	}
+
+	#
+	#  The example policy below updates the outer tunnel reply
+	#  (usually Access-Accept) with the User-Name from the inner
+	#  tunnel User-Name.  Since this section is processed in the
+	#  context of the inner tunnel, "request" here means "inner
+	#  tunnel request", and "outer.reply" means "outer tunnel
+	#  reply attributes".
+	#
+	#  This example is most useful when the outer session contains
+	#  a User-Name of "anonymous@....", or a MAC address.  If it
+	#  is enabled, the NAS SHOULD use the inner tunnel User-Name
+	#  in subsequent accounting packets.  This makes it easier to
+	#  track user sessions, as they will all be based on the real
+	#  name, and not on "anonymous".
+	#
+	#  The problem with doing this is that it ALSO exposes the
+	#  real user name to any intermediate proxies.  People use
+	#  "anonymous" identifiers outside of the tunnel for a very
+	#  good reason: it gives them more privacy.  Setting the reply
+	#  to contain the real user name removes ALL privacy from
+	#  their session.
+	#
+	#  If you still want to use the inner tunnel User-Name then
+	#  uncomment the section below, otherwise you may want
+	#  to use  Chargeable-User-Identity attribute from RFC 4372.
+	#  See further on.
+	#update outer.reply {
+	#  User-Name = "%{request:User-Name}"
+	#}
+	#
+}
+
+#
+#  When the server decides to proxy a request to a home server,
+#  the proxied request is first passed through the pre-proxy
+#  stage.  This stage can re-write the request, or decide to
+#  cancel the proxy.
+#
+#  Only a few modules currently have this method.
+#
+pre-proxy {
+	#  Uncomment the following line if you want to change attributes
+	#  as defined in the preproxy_users file.
+#	files
+
+	#  Uncomment the following line if you want to filter requests
+	#  sent to remote servers based on the rules defined in the
+	#  'attrs.pre-proxy' file.
+#	attr_filter.pre-proxy
+
+	#  If you want to have a log of packets proxied to a home
+	#  server, un-comment the following line, and the
+	#  'detail pre_proxy_log' section, above.
+#	pre_proxy_log
+}
+
+#
+#  When the server receives a reply to a request it proxied
+#  to a home server, the request may be massaged here, in the
+#  post-proxy stage.
+#
+post-proxy {
+
+	#  If you want to have a log of replies from a home server,
+	#  un-comment the following line, and the 'detail post_proxy_log'
+	#  section, above.
+#	post_proxy_log
+
+	#  Uncomment the following line if you want to filter replies from
+	#  remote proxies based on the rules defined in the 'attrs' file.
+#	attr_filter.post-proxy
+
+	#
+	#  If you are proxying LEAP, you MUST configure the EAP
+	#  module, and you MUST list it here, in the post-proxy
+	#  stage.
+	#
+	#  You MUST also use the 'nostrip' option in the 'realm'
+	#  configuration.  Otherwise, the User-Name attribute
+	#  in the proxied request will not match the user name
+	#  hidden inside of the EAP packet, and the end server will
+	#  reject the EAP request.
+	#
+	eap
+
+	#
+	#  If the server tries to proxy a request and fails, then the
+	#  request is processed through the modules in this section.
+	#
+	#  The main use of this section is to permit robust proxying
+	#  of accounting packets.  The server can be configured to
+	#  proxy accounting packets as part of normal processing.
+	#  Then, if the home server goes down, accounting packets can
+	#  be logged to a local "detail" file, for processing with
+	#  radrelay.  When the home server comes back up, radrelay
+	#  will read the detail file, and send the packets to the
+	#  home server.
+	#
+	#  With this configuration, the server always responds to
+	#  Accounting-Requests from the NAS, but only writes
+	#  accounting packets to disk if the home server is down.
+	#
+#	Post-Proxy-Type Fail {
+#			detail
+#	}
+
+}
+
+} # inner-tunnel server block
diff --git a/src/test/setup/radius-config/freeradius/sites-available/originate-coa b/src/test/setup/radius-config/freeradius/sites-available/originate-coa
new file mode 100644
index 0000000..79e2f1d
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/originate-coa
@@ -0,0 +1,190 @@
+# -*- text -*-
+######################################################################
+#
+#  The server can originate Change of Authorization (CoA) or
+#  Disconnect request packets.  These packets are used to dynamically
+#  change the parameters of a users session (bandwidth, etc.), or
+#  to forcibly disconnect the user.
+#
+#  There are some caveats.  Not all NAS vendors support this
+#  functionality.  Even for the ones that do, it may be difficult to
+#  find out what needs to go into a CoA-Request or Disconnect-Request
+#  packet.  All we can suggest is to read the NAS documentation
+#  available from the vendor.  That documentation SHOULD describe
+#  what information their equipment needs to see in a CoA packet.
+#
+#  This information is usually a list of attributes such as:
+#
+#	NAS-IP-Address (or NAS-IPv6 address)
+#	NAS-Identifier
+#	User-Name
+#	Acct-Session-Id
+#
+#  CoA packets can be originated when a normal Access-Request or
+#  Accounting-Request packet is received.  Simply update the
+#  "coa" list:
+#
+#	update coa {
+#	       User-Name = "%{User-Name}"
+#	       Acct-Session-Id = "%{Acct-Session-Id}"
+#	       NAS-IP-Address = "%{NAS-IP-Address}"
+#	}
+#
+#  And the CoA packet will be sent.  You can also send Disconnect
+#  packets by using "update disconnect { ...".
+#
+#  This "update coa" entry can be placed in any section (authorize,
+#  preacct, etc.), EXCEPT for pre-proxy and post-proxy.  The CoA
+#  packets CANNOT be sent if the original request has been proxied.
+#
+#  The CoA functionality works best when the RADIUS server and
+#  the NAS receiving CoA packets are on the same network.
+#
+#  If "update coa { ... " is used, and then later it becomes necessary
+#  to not send a CoA request, the following example can suppress the
+#  CoA packet:
+#
+#	update control {
+#		Send-CoA-Request = No
+#	}
+#
+#  The default destination of a CoA packet is the NAS (or client)
+#  the sent the original Access-Request or Accounting-Request.  See
+#  raddb/clients.conf for a "coa_server" configuration that ties
+#  a client to a specific home server, or to a home server pool.
+#
+#  If you need to send the packet to a different destination, update
+#  the "coa" list with one of:
+#
+#	Packet-Dst-IP-Address = ...
+#	Packet-Dst-IPv6-Address = ...
+#	Home-Server-Pool = ...
+#
+#  That specifies an Ipv4 or IPv6 address, or a home server pool
+#  (such as the "coa" pool example below).  This use is not
+#  recommended, however,  It is much better to point the client
+#  configuration directly at the CoA server/pool, as outlined
+#  earlier.
+#
+#  If the CoA port is non-standard, you can also set:
+#
+#	Packet-Dst-Port
+#
+#  to have the value of the port.
+#
+######################################################################
+
+#
+#  When CoA packets are sent to a NAS, the NAS is acting as a
+#  server (see RFC 5176).  i.e. it has a type (accepts CoA and/or
+#  Disconnect packets), an IP address (or IPv6 address), a
+#  destination port, and a shared secret.
+#
+#  This information *cannot* go into a "client" section.  In the future,
+#  FreeRADIUS will be able to receive, and to proxy CoA packets.
+#  Having the CoA configuration as below means that we can later do
+#  load-balancing, fail-over, etc. of CoA servers.  If the CoA
+#  configuration went into a "client" section, it would be impossible
+#  to do proper proxying of CoA requests.
+#
+home_server localhost-coa {
+	type = coa
+
+	#
+	#  Note that a home server of type "coa" MUST be a real NAS,
+	#  with an ipaddr or ipv6addr.  It CANNOT point to a virtual
+	#  server.
+	#
+	ipaddr = 127.0.0.1
+	port = 3799
+
+	#  This secret SHOULD NOT be the same as the shared
+	#  secret in a "client" section.
+	secret = testing1234
+
+	#  CoA specific parameters.  See raddb/proxy.conf for details.
+	coa {
+		irt = 2
+		mrt = 16
+		mrc = 5
+		mrd = 30
+	}
+}
+
+#
+#  CoA servers can be put into pools, just like normal servers.
+#
+home_server_pool coa {
+	type = fail-over
+
+	# Point to the CoA server above.
+	home_server = localhost-coa
+
+	#  CoA requests are run through the pre-proxy section.
+	#  CoA responses are run through the post-proxy section.
+	virtual_server = originate-coa.example.com
+
+	#
+	#  Home server pools of type "coa" cannot (currently) have
+	#  a "fallback" configuration.
+	#
+}
+
+#
+#  When this virtual server is run, the original request has FINISHED
+#  processing.  i.e. the reply has already been sent to the NAS.
+#  You can access the attributes in the original packet, reply, and
+#  control items, but changing them will have NO EFFECT.
+#
+#  The CoA packet is in the "proxy-request" attribute list.
+#  The CoA reply (if any) is in the "proxy-reply" attribute list.
+#
+server originate-coa.example.com {
+  pre-proxy {
+	update proxy-request {
+		NAS-IP-Address = 127.0.0.1
+	}
+  }
+
+  #
+  # Handle the responses here.
+  #
+  post-proxy {
+	switch "%{proxy-reply:Packet-Type}" {
+		case CoA-ACK {
+			ok
+		}
+
+		case CoA-NAK {
+			# the NAS didn't like the CoA request
+			ok
+		}
+
+		case Disconnect-ACK {
+			ok
+		}
+
+		case Disconnect-NAK {
+			# the NAS didn't like the Disconnect request
+			ok
+		}
+
+		# Invalid packet type.  This shouldn't happen.
+		case {
+		     fail
+		}
+	}
+
+	#
+	#  These methods are run when there is NO response
+	#  to the request.
+	#
+	Post-Proxy-Type Fail-CoA {
+		ok
+	}
+
+	Post-Proxy-Type Fail-Disconnect {
+		ok
+	}
+  }
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel b/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel
new file mode 100644
index 0000000..1ce4137
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/proxy-inner-tunnel
@@ -0,0 +1,47 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a virtual server that handles *only* inner tunnel
+#	requests for EAP-TTLS and PEAP types.
+#
+#	$Id$
+#
+######################################################################
+
+server proxy-inner-tunnel {
+
+#
+#  This example is very simple.  All inner tunnel requests get
+#  proxied to another RADIUS server.
+#
+authorize {
+	#
+	#  Do other things here, as necessary.
+	#
+	#  e.g. run the "realms" module, to decide how to proxy
+	#  the inner tunnel request.
+	#
+
+	update control {
+		#  You should update this to be one of your realms.
+		Proxy-To-Realm := "example.com"
+	}
+}
+
+authenticate {
+	#
+	#  This is necessary so that the inner tunnel EAP-MSCHAPv2
+	#  method can be called.  That method takes care of turning
+	#  EAP-MSCHAPv2 into plain MS-CHAPv2, if necessary.
+	eap
+}
+
+post-proxy {
+	#
+	#  This is necessary for LEAP, or if you set:
+	#
+	#  proxy_tunneled_request_as_eap = no
+	#
+	eap
+}
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting b/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting
new file mode 100644
index 0000000..9bf8697
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/robust-proxy-accounting
@@ -0,0 +1,167 @@
+# -*- text -*-
+######################################################################
+#
+#	This is a sample configuration for robust proxy accounting.
+#	accounting packets are proxied, OR logged locally if all
+#	home servers are down.  When the home servers come back up,
+#	the accounting packets are forwarded.
+#
+#	This method enables the server to proxy all packets to the
+#	home servers when they're up, AND to avoid writing to the
+#	detail file in most situations.
+#
+#	In most situations, proxying of accounting messages is done
+#	in a "pass-through" fashion.  If the home server does not
+#	respond, then the proxy server does not respond to the NAS.
+#	That means that the NAS must retransmit packets, sometimes
+#	forever.  This example shows how the proxy server can still
+#	respond to the NAS, even if all home servers are down.
+#
+#	This configuration could be done MUCH more simply if ALL
+#	packets were written to the detail file.  But that would
+#	involve a lot more disk writes, which may not be a good idea.
+#
+#	This file is NOT meant to be used as-is.  It needs to be
+#	edited to match your local configuration.
+#
+#	$Id$
+#
+######################################################################
+
+#  (1) Define two home servers.
+home_server home1.example.com {
+	type = acct
+	ipaddr = 192.0.2.10
+	port = 1813
+	secret = testing123
+
+	#  Mark this home server alive ONLY when it starts being responsive
+	status_check = request
+	username = "test_user_status_check"
+
+	#  Set the response timeout aggressively low.
+	#  You MAY have to increase this, depending on tests with
+	#  your local installation.
+	response_window = 6
+}
+
+home_server home2.example.com {
+	type = acct
+	ipaddr = 192.0.2.20
+	port = 1813
+	secret = testing123
+
+	#  Mark this home server alive ONLY when it starts being responsive
+	status_check = request
+	username = "test_user_status_check"
+
+	#  Set the response timeout aggressively low.
+	#  You MAY have to increase this, depending on tests with
+	#  your local installation.
+	response_window = 6
+}
+
+#  (2) Define a virtual server to be used when both of the
+#  home servers are down.
+home_server acct_detail.example.com {
+	virtual_server = acct_detail.example.com
+}
+
+#  Put all of the servers into a pool.
+home_server_pool acct_pool.example.com {
+	type = load-balance	# other types are OK, too.
+
+	home_server = home1.example.com
+	home_server = home2.example.com
+	# add more home_server's here.
+
+	# If all home servers are down, try a home server that
+	# is a local virtual server.
+	fallback = acct_detail.example.com
+
+	# for pre/post-proxy policies
+	virtual_server = home.example.com
+}
+
+#  (3) Define a realm for these home servers.
+#  It should NOT be used as part of normal proxying decisions!
+realm acct_realm.example.com {
+	acct_pool = acct_pool.example.com
+}
+
+#  (4) Define a detail file writer.
+#   See raddb/modules/detail.example.com
+
+#  (5) Define the virtual server to write the packets to the detail file
+#  This will be called when ALL home servers are down, because of the
+#  "fallback" configuration in the home server pool.
+server acct_detail.example.com {
+	accounting {
+		detail.example.com
+	}
+}
+
+#  (6) Define a virtual server to handle pre/post-proxy re-writing
+server home.example.com {
+	pre-proxy {
+		#  Insert pre-proxy rules here
+	}
+
+	post-proxy {
+		#  Insert post-proxy rules here
+
+		#  This will be called when the CURRENT packet failed
+		#  to be proxied.  This may happen when one home server
+		#  suddenly goes down, even though another home server
+		#  may be alive.
+		#
+		#  i.e. the current request has run out of time, so it
+		#  cannot fail over to another (possibly) alive server.
+		#
+		#  We want to respond to the NAS, so that it can stop
+		#  re-sending the packet.  We write the packet to the
+		#  "detail" file, where it will be read, and sent to
+		#  another home server.
+		#
+		Post-Proxy-Type Fail {
+			detail.example.com
+		}
+	}
+
+
+	#  Read accounting packets from the detail file(s) for
+	#  the home server.
+	#
+	#  Note that you can have only ONE "listen" section reading
+	#  detail files from a particular directory.  That is why the
+	#  destination host name is used as part of the directory name
+	#  below.  Having two "listen" sections reading detail files
+	#  from the same directory WILL cause problems.  The packets
+	#  may be read by one, the other, or both "listen" sections.
+	listen {
+		type = detail
+		filename = "${radacctdir}/detail.example.com/detail-*:*"
+		load_factor = 10
+	}
+
+	#  All packets read from the detail file are proxied back to
+	#  the home servers.
+	#
+	#  The normal pre/post-proxy rules are applied to them, too.
+	#
+	#  If the home servers are STILL down, then the server stops
+	#  reading the detail file, and queues the packets for a later
+	#  retransmission.  The Post-Proxy-Type "Fail" handler is NOT
+	#  called.
+	#
+	#  When the home servers come back up, the packets are forwarded,
+	#  and the detail file processed as normal.
+	accounting {
+		# You may want accounting policies here...
+
+		update control {
+			Proxy-To-Realm := "acct_realm.example.com"
+		}
+	}
+
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/soh b/src/test/setup/radius-config/freeradius/sites-available/soh
new file mode 100644
index 0000000..9196e5b
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/soh
@@ -0,0 +1,34 @@
+# This is a simple server for the MS SoH requests generated by the
+# peap module - see "eap.conf" for more info
+
+# Requests are ONLY passed through the authorize section, and cannot
+# current be proxied (in any event, the radius attributes used are
+# internal).
+
+server soh-server {
+	authorize {
+		if (SoH-Supported == no) {
+			# client NAKed our request for SoH - not supported, or turned off
+			update config {
+				Auth-Type = Accept
+			}
+		}
+		else {
+			# client replied; check something - this is a local policy issue!
+			if (SoH-MS-Windows-Health-Status =~ /antivirus (warn|error) /) {
+				update config {
+					Auth-Type = Reject
+				}
+				update reply {
+					Reply-Message = "You must have antivirus enabled & installed!"
+				}
+			}
+			else {
+				update config {
+					Auth-Type = Accept
+				}
+			}
+		}
+	}
+}
+
diff --git a/src/test/setup/radius-config/freeradius/sites-available/status b/src/test/setup/radius-config/freeradius/sites-available/status
new file mode 100644
index 0000000..5432203
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/status
@@ -0,0 +1,127 @@
+# -*- text -*-
+######################################################################
+#
+#	A virtual server to handle ONLY Status-Server packets.
+#
+#	Server statistics can be queried with a properly formatted
+#	Status-Server request.  See dictionary.freeradius for comments.
+#
+#	If radiusd.conf has "status_server = yes", then any client
+#	will be able to send a Status-Server packet to any port
+#	(listen section type "auth", "acct", or "status"), and the
+#	server will respond.
+#
+#	If radiusd.conf has "status_server = no", then the server will
+#	ignore Status-Server packets to "auth" and "acct" ports.  It
+#	will respond only if the Status-Server packet is sent to a
+#	"status" port.
+#
+#	The server statistics are available ONLY on socket of type
+#	"status".  Queries for statistics sent to any other port
+#	are ignored.
+#
+#	Similarly, a socket of type "status" will not process
+#	authentication or accounting packets.  This is for security.
+#
+#	$Id: e7d4346310b837d56bffe4c991b4e5680742ebc0 $
+#
+######################################################################
+
+server status {
+	listen {
+		#  ONLY Status-Server is allowed to this port.
+		#  ALL other packets are ignored.
+		type = status
+
+		ipaddr = 127.0.0.1
+		port = 18121
+	}
+
+	#
+	#  We recommend that you list ONLY management clients here.
+	#  i.e. NOT your NASes or Access Points, and for an ISP,
+	#  DEFINITELY not any RADIUS servers that are proxying packets
+	#  to you.
+	#
+	#  If you do NOT list a client here, then any client that is
+	#  globally defined (i.e. all of them) will be able to query
+	#  these statistics.
+	#
+	#  Do you really want your partners seeing the internal details
+	#  of what your RADIUS server is doing?
+	#
+	client admin {
+		ipaddr = 127.0.0.1
+		secret = adminsecret
+	}
+
+	#
+	#  Simple authorize section.  The "Autz-Type Status-Server"
+	#  section will work here, too.  See "raddb/sites-available/default".
+	authorize {
+		ok
+
+		# respond to the Status-Server request.
+		Autz-Type Status-Server {
+			ok
+		}
+	}
+}
+
+#	Statistics can be queried via a number of methods:
+#
+#	All packets received/sent by the server (1 = auth, 2 = acct)
+#		FreeRADIUS-Statistics-Type = 3
+#
+#	All packets proxied by the server (4 = proxy-auth, 8 = proxy-acct)
+#		FreeRADIUS-Statistics-Type = 12
+#
+#	All packets sent && received:
+#		FreeRADIUS-Statistics-Type = 15
+#
+#	Internal server statistics:
+#		FreeRADIUS-Statistics-Type = 16
+#
+#	All packets for a particular client (globally defined)
+#		FreeRADIUS-Statistics-Type = 35
+#		FreeRADIUS-Stats-Client-IP-Address = 192.0.2.1
+#
+#	All packets for a client attached to a "listen" ip/port
+#		FreeRADIUS-Statistics-Type = 35
+#		FreeRADIUS-Stats-Client-IP-Address = 192.0.2.1
+#		FreeRADIUS-Stats-Server-IP-Address = 127.0.0.1
+#		FreeRADIUS-Stats-Server-Port = 1812
+#
+#	All packets for a "listen" IP/port
+#		FreeRADIUS-Statistics-Type = 67
+#		FreeRADIUS-Stats-Server-IP-Address = 127.0.0.1
+#		FreeRADIUS-Stats-Server-Port = 1812
+#
+#	All packets for a home server IP / port
+#		FreeRADIUS-Statistics-Type = 131
+#		FreeRADIUS-Stats-Server-IP-Address = 192.0.2.2
+#		FreeRADIUS-Stats-Server-Port = 1812
+
+#
+#  You can also get exponentially weighted moving averages of
+#  response times (in usec) of home servers.  Just set the config
+#  item "historic_average_window" in a home_server section.
+#
+#  By default it is zero (don't calculate it).  Useful values
+#  are between 100, and 10,000.  The server will calculate and
+#  remember the moving average for this window, and for 10 times
+#  that window.
+#
+
+#
+#  Some of this could have been simplified.  e.g. the proxy-auth and
+#  proxy-acct bits aren't completely necessary.  But using them permits
+#  the server to be queried for ALL inbound && outbound packets at once.
+#  This gives a good snapshot of what the server is doing.
+#
+#  Due to internal limitations, the statistics might not be exactly up
+#  to date.  Do not expect all of the numbers to add up perfectly.
+#  The Status-Server packets are also counted in the total requests &&
+#  responses.  The responses are counted only AFTER the response has
+#  been sent.
+#
diff --git a/src/test/setup/radius-config/freeradius/sites-available/tls b/src/test/setup/radius-config/freeradius/sites-available/tls
new file mode 100644
index 0000000..0874951
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/tls
@@ -0,0 +1,506 @@
+######################################################################
+#
+#  Initial implementation of RADIUS over TLS (radsec)
+#
+######################################################################
+
+listen {
+	ipaddr = *
+	port = 2083
+
+	#
+	#  TCP and TLS sockets can accept Access-Request and
+	#  Accounting-Request on the same socket.
+	#
+	#	auth	  = only Access-Request
+	#	acct	  = only Accounting-Request
+	#	auth+acct = both
+	#
+	type = auth+acct
+
+	# For now, only TCP transport is allowed.
+	proto = tcp
+
+	# Send packets to the default virtual server
+	virtual_server = default
+
+	clients = radsec
+
+	#
+	#  Connection limiting for sockets with "proto = tcp".
+	#
+	limit {
+	      #
+	      #  Limit the number of simultaneous TCP connections to the socket
+	      #
+	      #  The default is 16.
+	      #  Setting this to 0 means "no limit"
+	      max_connections = 16
+
+	      #  The per-socket "max_requests" option does not exist.
+
+	      #
+	      #  The lifetime, in seconds, of a TCP connection.  After
+	      #  this lifetime, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "forever".
+	      lifetime = 0
+
+	      #
+	      #  The idle timeout, in seconds, of a TCP connection.
+	      #  If no packets have been received over the connection for
+	      #  this time, the connection will be closed.
+	      #
+	      #  Setting this to 0 means "no timeout".
+	      #
+	      #  We STRONGLY RECOMMEND that you set an idle timeout.
+	      #
+	      idle_timeout = 30
+	}
+
+	#  This is *exactly* the same configuration as used by the EAP-TLS
+	#  module.  It's OK for testing, but for production use it's a good
+	#  idea to use different server certificates for EAP and for RADIUS
+	#  transport.
+	#
+	#  If you want only one TLS configuration for multiple sockets,
+	#  then we suggest putting "tls { ...}" into radiusd.conf.
+	#  The subsection below can then be changed into a reference:
+	#
+	#	tls = ${tls}
+	#
+	#  Which means "the tls sub-section is not here, but instead is in
+	#  the top-level section called 'tls'".
+	#
+	#  If you have multiple tls configurations, you can put them into
+	#  sub-sections of a top-level "tls" section.  There's no need to
+	#  call them all "tls".  You can then use:
+	#
+	#	tls = ${tls.site1}
+	#
+	#  to refer to the "site1" sub-section of the "tls" section.
+	#
+	tls {
+		private_key_password = whatever
+		private_key_file = ${certdir}/server.pem
+
+		#  If Private key & Certificate are located in
+		#  the same file, then private_key_file &
+		#  certificate_file must contain the same file
+		#  name.
+		#
+		#  If ca_file (below) is not used, then the
+		#  certificate_file below MUST include not
+		#  only the server certificate, but ALSO all
+		#  of the CA certificates used to sign the
+		#  server certificate.
+		certificate_file = ${certdir}/server.pem
+
+		#  Trusted Root CA list
+		#
+		#  ALL of the CA's in this list will be trusted
+		#  to issue client certificates for authentication.
+		#
+		#  In general, you should use self-signed
+		#  certificates for 802.1x (EAP) authentication.
+		#  In that case, this CA file should contain
+		#  *one* CA certificate.
+		#
+		#  This parameter is used only for EAP-TLS,
+		#  when you issue client certificates.  If you do
+		#  not use client certificates, and you do not want
+		#  to permit EAP-TLS authentication, then delete
+		#  this configuration item.
+		ca_file = ${cadir}/ca.pem
+
+		#
+		#  For DH cipher suites to work, you have to
+		#  run OpenSSL to create the DH file first:
+		#
+		#  	openssl dhparam -out certs/dh 1024
+		#
+		dh_file = ${certdir}/dh
+
+		#
+		#  If your system doesn't have /dev/urandom,
+		#  you will need to create this file, and
+		#  periodically change its contents.
+		#
+		#  For security reasons, FreeRADIUS doesn't
+		#  write to files in its configuration
+		#  directory.
+		#
+#		random_file = ${certdir}/random
+
+		#
+		#  The default fragment size is 1K.
+		#  However, it's possible to send much more data than
+		#  that over a TCP connection.  The upper limit is 64K.
+		#  Setting the fragment size to more than 1K means that
+		#  there are fewer round trips when setting up a TLS
+		#  connection.  But only if the certificates are large.
+		#
+		fragment_size = 8192
+
+		#  include_length is a flag which is
+		#  by default set to yes If set to
+		#  yes, Total Length of the message is
+		#  included in EVERY packet we send.
+		#  If set to no, Total Length of the
+		#  message is included ONLY in the
+		#  First packet of a fragment series.
+		#
+	#	include_length = yes
+
+		#  Check the Certificate Revocation List
+		#
+		#  1) Copy CA certificates and CRLs to same directory.
+		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
+		#    'c_rehash' is OpenSSL's command.
+		#  3) uncomment the line below.
+		#  5) Restart radiusd
+	#	check_crl = yes
+		ca_path = ${cadir}
+
+	       #
+	       #  If check_cert_issuer is set, the value will
+	       #  be checked against the DN of the issuer in
+	       #  the client certificate.  If the values do not
+	       #  match, the certificate verification will fail,
+	       #  rejecting the user.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-Issuer attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
+
+	       #
+	       #  If check_cert_cn is set, the value will
+	       #  be xlat'ed and checked against the CN
+	       #  in the client certificate.  If the values
+	       #  do not match, the certificate verification
+	       #  will fail rejecting the user.
+	       #
+	       #  This check is done only if the previous
+	       #  "check_cert_issuer" is not set, or if
+	       #  the check succeeds.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-CN attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#	check_cert_cn = %{User-Name}
+	#
+		# Set this option to specify the allowed
+		# TLS cipher suites.  The format is listed
+		# in "man 1 ciphers".
+		cipher_list = "DEFAULT"
+
+		#
+
+		#  This configuration entry should be deleted
+		#  once the server is running in a normal
+		#  configuration.  It is here ONLY to make
+		#  initial deployments easier.
+		#
+		#
+		#  This is enabled in eap.conf, so we don't need it here.
+		#
+#		make_cert_command = "${certdir}/bootstrap"
+
+		#
+		#  Session resumption / fast reauthentication
+		#  cache.
+		#
+		#  The cache contains the following information:
+		#
+		#  session Id - unique identifier, managed by SSL
+		#  User-Name  - from the Access-Accept
+		#  Stripped-User-Name - from the Access-Request
+		#  Cached-Session-Policy - from the Access-Accept
+		#
+		#  The "Cached-Session-Policy" is the name of a
+		#  policy which should be applied to the cached
+		#  session.  This policy can be used to assign
+		#  VLANs, IP addresses, etc.  It serves as a useful
+		#  way to re-apply the policy from the original
+		#  Access-Accept to the subsequent Access-Accept
+		#  for the cached session.
+		#
+		#  On session resumption, these attributes are
+		#  copied from the cache, and placed into the
+		#  reply list.
+		#
+		#  You probably also want "use_tunneled_reply = yes"
+		#  when using fast session resumption.
+		#
+		cache {
+		      #
+		      #  Enable it.  The default is "no".
+		      #  Deleting the entire "cache" subsection
+		      #  Also disables caching.
+		      #
+		      #  You can disallow resumption for a
+		      #  particular user by adding the following
+		      #  attribute to the control item list:
+		      #
+		      #		Allow-Session-Resumption = No
+		      #
+		      #  If "enable = no" below, you CANNOT
+		      #  enable resumption for just one user
+		      #  by setting the above attribute to "yes".
+		      #
+		      enable = no
+
+		      #
+		      #  Lifetime of the cached entries, in hours.
+		      #  The sessions will be deleted after this
+		      #  time.
+		      #
+		      lifetime = 24 # hours
+
+		      #
+		      #  The maximum number of entries in the
+		      #  cache.  Set to "0" for "infinite".
+		      #
+		      #  This could be set to the number of users
+		      #  who are logged in... which can be a LOT.
+		      #
+		      max_entries = 255
+
+		      #
+		      #  Internal "name" of the session cache.
+		      #  Used to distinguish which TLS context
+		      #  sessions belong to.
+		      #
+		      #  The server will generate a random value
+		      #  if unset. This will change across server
+		      #  restart so you MUST set the "name" if you
+		      #  want to persist sessions (see below).
+		      #
+		      #  If you use IPv6, change the "ipaddr" below
+		      #  to "ipv6addr"
+		      #
+		      #name = "TLS ${..ipaddr} ${..port} ${..proto}"
+
+		      #
+		      #  Simple directory-based storage of sessions.
+		      #  Two files per session will be written, the SSL
+		      #  state and the cached VPs. This will persist session
+		      #  across server restarts.
+		      #
+		      #  The server will need write perms, and the directory
+		      #  should be secured from anyone else. You might want
+		      #  a script to remove old files from here periodically:
+		      #
+		      #    find ${logdir}/tlscache -mtime +2 -exec rm -f {} \;
+		      #
+		      #  This feature REQUIRES "name" option be set above.
+		      #
+		      #persist_dir = "${logdir}/tlscache"
+		}
+
+		#
+		#  Require a client certificate.
+		#
+		require_client_cert = yes
+
+		#
+		#  As of version 2.1.10, client certificates can be
+		#  validated via an external command.  This allows
+		#  dynamic CRLs or OCSP to be used.
+		#
+		#  This configuration is commented out in the
+		#  default configuration.  Uncomment it, and configure
+		#  the correct paths below to enable it.
+		#
+		verify {
+			#  A temporary directory where the client
+			#  certificates are stored.  This directory
+			#  MUST be owned by the UID of the server,
+			#  and MUST not be accessible by any other
+			#  users.  When the server starts, it will do
+			#  "chmod go-rwx" on the directory, for
+			#  security reasons.  The directory MUST
+			#  exist when the server starts.
+			#
+			#  You should also delete all of the files
+			#  in the directory when the server starts.
+	#     		tmpdir = /tmp/radiusd
+
+			#  The command used to verify the client cert.
+			#  We recommend using the OpenSSL command-line
+			#  tool.
+			#
+			#  The ${..ca_path} text is a reference to
+			#  the ca_path variable defined above.
+			#
+			#  The %{TLS-Client-Cert-Filename} is the name
+			#  of the temporary file containing the cert
+			#  in PEM format.  This file is automatically
+			#  deleted by the server when the command
+			#  returns.
+	#    		client = "/path/to/openssl verify -CApath ${..ca_path} %{TLS-Client-Cert-Filename}"
+		}
+	}
+}
+
+clients radsec {
+	client 127.0.0.1 {
+		ipaddr = 127.0.0.1
+
+		#
+		#  Ensure that this client is TLS *only*.
+		#
+		proto = tls
+
+		#
+		#  TCP clients can have any shared secret.
+		#
+		#  TLS clients MUST have the shared secret
+		#  set to "radsec".  Or, for "proto = tls",
+		#  you can omit the secret, and it will
+		#  automatically be set to "radsec".
+		#
+		secret = radsec
+
+		#
+		#  You can also use a "limit" section here.
+		#  See raddb/clients.conf for examples.
+		#
+		#  Note that BOTH limits are applied.  You
+		#  should therefore set the "listen" limits
+		#  higher than the ones for each individual
+		#  client.
+		#
+	}
+}
+
+home_server tls {
+	ipaddr = 127.0.0.1
+	port = 2083
+	type = auth
+	secret = testing123
+	proto = tcp
+	status_check = none
+
+	tls {
+		private_key_password = whatever
+		private_key_file = ${certdir}/client.pem
+
+		#  If Private key & Certificate are located in
+		#  the same file, then private_key_file &
+		#  certificate_file must contain the same file
+		#  name.
+		#
+		#  If ca_file (below) is not used, then the
+		#  certificate_file below MUST include not
+		#  only the server certificate, but ALSO all
+		#  of the CA certificates used to sign the
+		#  server certificate.
+		certificate_file = ${certdir}/client.pem
+
+		#  Trusted Root CA list
+		#
+		#  ALL of the CA's in this list will be trusted
+		#  to issue client certificates for authentication.
+		#
+		#  In general, you should use self-signed
+		#  certificates for 802.1x (EAP) authentication.
+		#  In that case, this CA file should contain
+		#  *one* CA certificate.
+		#
+		#  This parameter is used only for EAP-TLS,
+		#  when you issue client certificates.  If you do
+		#  not use client certificates, and you do not want
+		#  to permit EAP-TLS authentication, then delete
+		#  this configuration item.
+		ca_file = ${cadir}/ca.pem
+
+		#
+		#  For DH cipher suites to work, you have to
+		#  run OpenSSL to create the DH file first:
+		#
+		#  	openssl dhparam -out certs/dh 1024
+		#
+		dh_file = ${certdir}/dh
+		random_file = ${certdir}/random
+
+		#
+		#  The default fragment size is 1K.
+		#  However, TLS can send 64K of data at once.
+		#  It can be useful to set it higher.
+		#
+		fragment_size = 8192
+
+		#  include_length is a flag which is
+		#  by default set to yes If set to
+		#  yes, Total Length of the message is
+		#  included in EVERY packet we send.
+		#  If set to no, Total Length of the
+		#  message is included ONLY in the
+		#  First packet of a fragment series.
+		#
+	#	include_length = yes
+
+		#  Check the Certificate Revocation List
+		#
+		#  1) Copy CA certificates and CRLs to same directory.
+		#  2) Execute 'c_rehash <CA certs&CRLs Directory>'.
+		#    'c_rehash' is OpenSSL's command.
+		#  3) uncomment the line below.
+		#  5) Restart radiusd
+	#	check_crl = yes
+		ca_path = ${cadir}
+
+	       #
+	       #  If check_cert_issuer is set, the value will
+	       #  be checked against the DN of the issuer in
+	       #  the client certificate.  If the values do not
+	       #  match, the certificate verification will fail,
+	       #  rejecting the user.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-Issuer attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#       check_cert_issuer = "/C=GB/ST=Berkshire/L=Newbury/O=My Company Ltd"
+
+	       #
+	       #  If check_cert_cn is set, the value will
+	       #  be xlat'ed and checked against the CN
+	       #  in the client certificate.  If the values
+	       #  do not match, the certificate verification
+	       #  will fail rejecting the user.
+	       #
+	       #  This check is done only if the previous
+	       #  "check_cert_issuer" is not set, or if
+	       #  the check succeeds.
+	       #
+	       #  In 2.1.10 and later, this check can be done
+	       #  more generally by checking the value of the
+	       #  TLS-Client-Cert-CN attribute.  This check
+	       #  can be done via any mechanism you choose.
+	       #
+	#	check_cert_cn = %{User-Name}
+	#
+		# Set this option to specify the allowed
+		# TLS cipher suites.  The format is listed
+		# in "man 1 ciphers".
+		cipher_list = "DEFAULT"
+	}
+
+}
+
+home_server_pool tls {
+		 type = fail-over
+		 home_server = tls
+}
+
+realm tls {
+      auth_pool = tls
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com b/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com
new file mode 100644
index 0000000..b78a520
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/virtual.example.com
@@ -0,0 +1,26 @@
+# -*- text -*-
+######################################################################
+#
+#	Sample virtual server for internally proxied requests.
+#
+#	See the "realm virtual.example.com" example in "proxy.conf".
+#
+#	$Id: 211daab3af0161aefa4990b137ba2739257f8326 $
+#
+######################################################################
+
+#
+#  Sample contents: just do everything that the default configuration does.
+#
+#  You WILL want to edit this to your local needs.  We suggest copying
+#  the "default" file here, and then editing it.  That way, any
+#  changes to the "default" file will not affect this virtual server,
+#  and vice-versa.
+#
+#  When this virtual server receives the request, the original
+#  attributes can be accessed as "outer.request", "outer.control", etc.
+#  See "man unlang" for more details.
+#
+server virtual.example.com {
+$INCLUDE	${confdir}/sites-available/default
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-available/vmps b/src/test/setup/radius-config/freeradius/sites-available/vmps
new file mode 100644
index 0000000..64d5e93
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-available/vmps
@@ -0,0 +1,98 @@
+# -*- text -*-
+######################################################################
+#
+#	As of version 2.0.0, the server also supports the VMPS
+#	protocol.
+#
+#	$Id: 8703902cafb5cc2b869dc42da9f554da313825ad $
+#
+######################################################################
+
+server vmps {
+	listen {
+		# VMPS sockets only support IPv4 addresses.
+		ipaddr = *
+
+		#  Port on which to listen.
+		#  Allowed values are:
+		#	integer port number
+		#	1589 is the default VMPS port.
+		port = 1589
+
+		#  Type of packets to listen for.  Here, it is VMPS.
+		type = vmps
+
+		#  Some systems support binding to an interface, in addition
+		#  to the IP address.  This feature isn't strictly necessary,
+		#  but for sites with many IP addresses on one interface,
+		#  it's useful to say "listen on all addresses for
+		#  eth0".
+		#
+		#  If your system does not support this feature, you will
+		#  get an error if you try to use it.
+		#
+		#	interface = eth0
+	}
+
+	#  If you have switches that are allowed to send VMPS, but NOT
+	#  RADIUS packets, then list them here as "client" sections.
+	#
+	#  Note that for compatibility with RADIUS, you still have to
+	#  list a "secret" for each client, though that secret will not
+	#  be used for anything.
+
+
+	#  And the REAL contents.  This section is just like the
+	#  "post-auth" section of radiusd.conf.  In fact, it calls the
+	#  "post-auth" component of the modules that are listed here.
+	#  But it's called "vmps" to highlight that it's for VMPS.
+	#
+	vmps {
+		#
+		#  Some requests may not have a MAC address.  Try to
+		#  create one using other attributes.
+		if (!VMPS-Mac) {
+			if (VMPS-Ethernet-Frame =~ /0x.{12}(..)(..)(..)(..)(..)(..).*/) {
+				update request {
+					VMPS-Mac = "%{1}:%{2}:%{3}:%{4}:%{5}:%{6}"
+				}
+			}
+			else {
+				update request {
+					VMPS-Mac = "%{VMPS-Cookie}"
+				}
+			}
+		}
+
+		#  Do a simple mapping of MAC to VLAN.
+		#
+		#  See radiusd.conf for the definition of the "mac2vlan"
+		#  module.
+		#
+		#mac2vlan
+
+		# required VMPS reply attributes
+		update reply {
+			VMPS-Packet-Type = VMPS-Join-Response
+			VMPS-Cookie = "%{VMPS-Mac}"
+
+			VMPS-VLAN-Name = "please_use_real_vlan_here"
+
+			#
+			#  If you have VLAN's in a database, you can select
+			#  the VLAN name based on the MAC address.
+			#
+			#VMPS-VLAN-Name = "%{sql:select ... where mac='%{VMPS-Mac}'}"
+		}
+
+		# correct reply packet type for reconfirmation requests
+		#
+		if (VMPS-Packet-Type == VMPS-Reconfirm-Request){
+			update reply {
+				VMPS-Packet-Type := VMPS-Reconfirm-Response
+			}
+		}
+	}
+
+	# Proxying of VMPS requests is NOT supported.
+}
diff --git a/src/test/setup/radius-config/freeradius/sites-enabled/default b/src/test/setup/radius-config/freeradius/sites-enabled/default
new file mode 120000
index 0000000..6d9ba33
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-enabled/default
@@ -0,0 +1 @@
+../sites-available/default
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel b/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel
new file mode 120000
index 0000000..55aba6e
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/sites-enabled/inner-tunnel
@@ -0,0 +1 @@
+../sites-available/inner-tunnel
\ No newline at end of file
diff --git a/src/test/setup/radius-config/freeradius/start-radius.py b/src/test/setup/radius-config/freeradius/start-radius.py
new file mode 100755
index 0000000..6ef7726
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/start-radius.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+import pexpect
+import time
+child = pexpect.spawn('sh -c radius')
+child.expect('Enter PEM pass phrase:')
+child.sendline('whatever')
+while True:
+    time.sleep(3600)
diff --git a/src/test/setup/radius-config/freeradius/templates.conf b/src/test/setup/radius-config/freeradius/templates.conf
new file mode 100644
index 0000000..22c0a09
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/templates.conf
@@ -0,0 +1,108 @@
+# -*- text -*-
+##
+## templates.conf -- configurations to be used in multiple places
+##
+##	$Id: 7b8b44e051c974c1a0a6e27a0cff50e621835df2 $
+
+######################################################################
+#
+#  Version 2.0 has a useful new feature called "templates".
+#
+#  Use templates by adding a line in radiusd.conf:
+#
+#	$INCLUDE templates.conf
+#
+#  The goal of the templates is to have common configuration located
+#  in this file, and to list only the *differences* in the individual
+#  sections.  This feature is most useful for sections like "clients"
+#  or "home_servers", where many may be defined, and each one has
+#  similar repeated configuration.
+#
+#  Something similar to templates can be done by putting common
+#  configuration into separate files, and using "$INCLUDE file...",
+#  but this is more flexible, and simpler to understand.  It's also
+#  cheaper for the server, because "$INCLUDE" makes a copy of the
+#  configuration for inclusion, and templates are simply referenced.
+#
+#  The templates are defined in the "templates" section, so that they
+#  do not affect the rest of the server configuration.
+#
+#  A section can reference a template by using "$template name"
+#
+templates {
+	#
+	#  The contents of the templates section are other
+	#  configuration sections that would normally go into
+	#  the configuration files.
+	#
+
+	#
+	#  This is a default template for the "home_server" section.
+	#  Note that there is no name for the section.
+	#
+	#  Any configuration item that is valid for a "home_server"
+	#  section is also valid here.  When a "home_server" section
+	#  is defined in proxy.conf, this section is referenced as
+	#  the template.
+	#
+	#  Configuration items that are explicitly listed in a
+	#  "home_server" section of proxy.conf are used in
+	#  preference to the configuration items listed here.
+	#
+	#  However, if a configuration item is NOT listed in a
+	#  "home_server" section of proxy.conf, then the value here
+	#  is used.
+	#
+	#  This functionality lets you put common configuration into
+	#  a template, and to put only the unique configuration
+	#  items in "proxy.conf".  Each section in proxy.conf can
+	#  then contain a line "$template home_server", which will
+	#  cause it to reference this template.
+	#
+	home_server {
+		response_window = 20
+		zombie_period = 40
+		revive_interval = 120
+		#
+		#  Etc.
+	}
+
+	#
+	#  You can also have named templates.  For example, if you
+	#  are proxying to 3 different home servers all at the same
+	#  site, with identical configurations (other than IP
+	#  addresses), you can use this named template.
+	#
+
+	#  Then, each "home_server" section in "proxy.conf" would
+	#  only list the IP address of that home server, and a
+	#  line saying
+	#
+	#		$template example_com
+	#
+	#  That would tell FreeRADIUS to look in the section below
+	#  for the rest of the configuration items.
+	#
+	#  For various reasons, you shouldn't have a "." in the template
+	#  name.  Doing so means that the server will be unable to find
+	#  the template.
+	#
+	example_com {
+		type = auth
+		port = 1812
+		secret = testing123
+		response_window = 20
+		#
+		# Etc...
+	}
+
+	#
+	#  You can have templates for other sections, too, but they
+	#  seem to be most useful for home_servers.
+	#
+	#  For now, you can use templates only for sections in
+	#  radiusd.conf, not sub-sections.  So you still have to use
+	#  the "$INCLUDE file.." method for things like defining
+	#  multiple "sql" modules, each with similar configuration.
+	#
+}
diff --git a/src/test/setup/radius-config/freeradius/trigger.conf b/src/test/setup/radius-config/freeradius/trigger.conf
new file mode 100644
index 0000000..77ca355
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/trigger.conf
@@ -0,0 +1,260 @@
+# -*- text -*-
+##
+## trigger.conf -- Events in the server can trigger a hook to be executed.
+##
+##	$Id: 5cbe8d7d8a09549c060748a582cd6ed359e0e999 $
+
+#
+#  The triggers are named as "type.subtype.value".  These names refer
+#  to subsections and then configuration items in the "trigger"
+#  section below.  When an event occurs, the trigger is executed.  The
+#  trigger is simply a program that is run, with optional arguments.
+#
+#  The server does not wait when a trigger is executed.  It is simply
+#  a "one-shot" event that is sent.
+#
+#  The trigger names should be self-explanatory.
+#
+
+#
+#  SNMP configuration.
+#
+#  For now, this is only for SNMP traps.
+#
+#  They are enabled by uncommenting (or adding) "$INCLUDE trigger.conf"
+#  in the main "radiusd.conf" file.
+#
+#  The traps *REQUIRE* that the files in the "mibs" directory be copied
+#  to the global mibs directory, usually /usr/share/snmp/mibs/.
+#  If this is not done, the "snmptrap" program has no idea what information
+#  to send, and will not work.  The MIB installation is *NOT* done as
+#  part of the default installation, so that step *MUST* be done manually.
+#
+#  The global MIB directory can be found by running the following command:
+#
+#	snmptranslate -Dinit_mib .1.3 2>&1 | grep MIBDIR | sed "s/' .*//;s/.* '//;s/.*://"
+#
+#  Or maybe just:
+#
+#	snmptranslate -Dinit_mib .1.3 2>&1 | grep MIBDIR
+#
+#  If you have copied the MIBs to that directory, you can test the
+#  FreeRADIUS MIBs by running the following command:
+#
+#	snmptranslate -m +FREERADIUS-NOTIFICATION-MIB -IR -On  serverStart
+#
+#  It should print out:
+#
+#	.1.3.6.1.4.1.11344.4.1.1
+#
+#  As always, run the server in debugging mode after enabling the
+#  traps.  You will see the "snmptrap" command being run, and it will
+#  print out any errors or issues that it encounters.  Those need to
+#  be fixed before running the server in daemon mode.
+#
+#  We also suggest running in debugging mode as the "radiusd" user, if
+#  you have "user/group" set in radiusd.conf.  The "snmptrap" program
+#  may behave differently when run as "root" or as the "radiusd" user.
+#
+snmp {
+	#
+	#  Configuration for SNMP traps / notifications
+	#
+	#  To disable traps, edit "radiusd.conf", and delete the line
+	#  which says "$INCUDE trigger.conf"
+	#
+	trap {
+		#
+		#  Absolute path for the "snmptrap" command, and
+		#  default command-line arguments.
+		#
+		#  You can disable traps by changing the command to
+		#  "/bin/echo".
+		#
+		cmd = "/usr/bin/snmptrap -v2c"
+
+		#
+		#  Community string
+		#
+		community = "public"
+
+		#
+		#  Agent configuration.
+		#
+		agent = "localhost ''"
+	}
+}
+
+#
+#  The "snmptrap" configuration defines the full command used to run the traps.
+#
+#  This entry should not be edited.  Instead, edit the "trap" section above.
+#
+snmptrap = "${snmp.trap.cmd} -c ${snmp.trap.community} ${snmp.trap.agent} FREERADIUS-NOTIFICATION-MIB"
+
+#
+#  The individual triggers are defined here.  You can disable one by
+#  deleting it, or by commenting it out.  You can disable an entire
+#  section of traps by deleting the section.
+#
+#  The entries below should not be edited.  For example, the double colons
+#  *must* immediately follow the ${snmptrap} reference.  Adding a space
+#  before the double colons  will break all SNMP traps.
+#
+#  However... the traps are just programs which are run when
+#  particular events occur.  If you want to replace a trap with
+#  another program, you can.  Just edit the definitions below, so that
+#  they run a program of your choice.
+#
+#  For example, you can leverage the "start/stop" triggers to run a
+#  program when the server starts, or when it stops.  But that will
+#  prevent the start/stop SNMP traps from working, of course.
+#
+trigger {
+	#
+	# Events in the server core
+	#
+	server {
+		# the server has just started
+		start = "${snmptrap}::serverStart"
+
+		# the server is about to stop
+		stop = "${snmptrap}::serverStop"
+
+		# The "max_requests" condition has been reached.
+		# This will trigger only once per 60 seconds.
+		max_requests = "${snmptrap}::serverMaxRequests"
+
+		# For events related to clients
+		client {
+			#  Added a new dynamic client
+			add = "/path/to/file %{Packet-Src-IP-Address}"
+
+			#  There is no event for when dynamic clients expire
+		}
+
+		# Events related to signals received.
+		signal {
+			# a HUP signal
+			hup = "${snmptrap}::signalHup"
+
+			# a TERM signal
+			term = "${snmptrap}::signalTerm"
+		}
+
+
+		# Events related to the thread pool
+		thread {
+		       # A new thread has been started
+		       start = "${snmptrap}::threadStart"
+
+		       # an existing thread has been stopped
+		       stop = "${snmptrap}::threadStop"
+
+		       # an existing thread is unresponsive
+		       unresponsive = "${snmptrap}::threadUnresponsive"
+
+		       # the "max_threads" limit has been reached
+		       max_threads = "${snmptrap}::threadMaxThreads"
+		}
+	}
+
+	# When a home server changes state.
+	# These traps are edge triggered.
+	home_server {
+		# common arguments: IP, port, identifier
+		args = "radiusAuthServerAddress a %{proxy-request:Packet-Dst-IP-Address} radiusAuthClientServerPortNumber i %{proxy-request:Packet-Dst-Port} radiusAuthServIdent s '%{home_server:instance}'"
+
+		# The home server has been marked "alive"
+		alive = "${snmptrap}::homeServerAlive ${args}"
+
+		# The home server has been marked "zombie"
+		zombie = "${snmptrap}::homeServerZombie ${args}"
+
+		# The home server has been marked "dead"
+		dead = "${snmptrap}::homeServerDead ${args}"
+	}
+
+	# When a pool of home servers changes state.
+	home_server_pool {
+		# common arguments
+		args = "radiusdConfigName s %{home_server:instance}"
+
+		# It has reverted to "normal" mode, where at least one
+		# home server is alive.
+		normal = "${snmptrap}::homeServerPoolNormal ${args}"
+
+		# It is in "fallback" mode, with all home servers "dead"
+		fallback = "${snmptrap}::homeServerPoolFallback ${args}"
+	}
+
+	#  Triggers for specific modules.  These are NOT in the module
+	#  configuration because they are global to all instances of the
+	#  module.  You can have module-specific triggers, by placing a
+	#  "trigger" subsection in the module configuration.
+	modules {
+		# Common arguments
+		args = "radiusdModuleName s ldap' radiusdModuleInstance s ''"
+
+		# The files module
+		files {
+			# The module has been HUP'd via radmin
+			hup = "${snmptrap}::serverModuleHup ${..args}"
+
+			# Note that "hup" can be used for every module
+			# which can be HUP'd via radmin
+		}
+
+		# The LDAP module
+		ldap {
+			# Failed to open a new connection to the DB
+			fail = "${snmptrap}::serverModuleConnectionFail ${..args}"
+
+			# There are no "open", "close", or "none" setting.
+			# This is because the LDAP module re-connects and closes
+			# the connection for every "bind as user" query.
+		}
+
+		# The SQL module
+		sql {
+			# A new connection to the DB has been opened
+			open = "${snmptrap}::serverModuleConnectionUp ${..args}"
+
+			# A connection to the DB has been closed
+			close = "${snmptrap}::serverModuleConnectionDown ${..args}"
+
+			# Failed to open a new connection to the DB
+			fail = "${snmptrap}::serverModuleConnectionFail ${..args}"
+
+			# There are no DB handles available.
+			none = "${snmptrap}::serverModuleConnectionNone ${..args}"
+		}
+	}
+}
+
+#
+#  The complete list of triggers as generated from the source code is below.
+#
+#  These are the ONLY traps which are generated.  You CANNOT add new traps
+#  by defining them in one of the sections above.  New traps can be created
+#  only by edited both the source code to the server, *and* the MIBs.
+#  If you are not an expert in C and SNMP, then adding new traps will be
+#  difficult to create.
+#
+# home_server.alive
+# home_server.dead
+# home_server.zombie
+# home_server_pool.fallback
+# home_server_pool.normal
+# modules.*.hup
+# modules.ldap.fail
+# modules.sql.close
+# modules.sql.fail
+# modules.sql.none
+# modules.sql.open
+# server.client.add
+# server.max_requests
+# server.signal.hup
+# server.signal.term
+# server.start
+# server.stop
diff --git a/src/test/setup/radius-config/freeradius/users b/src/test/setup/radius-config/freeradius/users
new file mode 120000
index 0000000..7055798
--- /dev/null
+++ b/src/test/setup/radius-config/freeradius/users
@@ -0,0 +1 @@
+mods-config/files/authorize
\ No newline at end of file
diff --git a/src/test/setup/test_docker/Dockerfile b/src/test/setup/test_docker/Dockerfile
new file mode 100644
index 0000000..e5c72a7
--- /dev/null
+++ b/src/test/setup/test_docker/Dockerfile
@@ -0,0 +1,19 @@
+FROM ubuntu:14.04
+MAINTAINER chetan@ciena.com
+
+RUN apt-get update 
+RUN apt-get -y install git python python-pip python-setuptools python-scapy tcpdump doxygen doxypy
+RUN easy_install nose
+RUN apt-get -y install openvswitch-common openvswitch-switch
+WORKDIR /root
+RUN mkdir ovs
+COPY ./openvswitch-2.4.0.tar.gz /root
+COPY ./build_ovs.sh /root/
+RUN /root/build_ovs.sh
+RUN apt-get -y install python-twisted python-sqlite sqlite3
+RUN pip install scapy-ssl_tls
+RUN pip install -U scapy
+RUN pip install monotonic
+RUN mv /usr/sbin/tcpdump /sbin/
+RUN ln -sf /sbin/tcpdump /usr/sbin/tcpdump
+CMD ["/bin/bash"]
diff --git a/src/test/setup/test_docker/build_ovs.sh b/src/test/setup/test_docker/build_ovs.sh
new file mode 100755
index 0000000..093d8c5
--- /dev/null
+++ b/src/test/setup/test_docker/build_ovs.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+echo "OVS installation"
+cd /root/ && tar zxpvf openvswitch-2.4.0.tar.gz -C /root/ovs
+cd /root/ovs
+cd openvswitch-2.4.0 && ./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --disable-ssl && make && make install
+service openvswitch-controller stop
+service openvswitch-switch restart
diff --git a/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz b/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz
new file mode 100644
index 0000000..135022b
--- /dev/null
+++ b/src/test/setup/test_docker/openvswitch-2.4.0.tar.gz
Binary files differ
diff --git a/src/test/subscriber/__init__.py b/src/test/subscriber/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/subscriber/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/subscriber/generate_portmap.py b/src/test/subscriber/generate_portmap.py
new file mode 100644
index 0000000..a4d8fb0
--- /dev/null
+++ b/src/test/subscriber/generate_portmap.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+##Generate a port map for 100 subscribers based on veth pairs
+import sys
+header = '''###This file is auto-generated. Do not EDIT###'''
+def generate_port_map(num = 100):
+    print("g_subscriber_port_map = {}")
+    for i in xrange(1, num+1):
+        intf = 'veth' + str(2*i-2)
+        print("g_subscriber_port_map[%d]='%s'" %(i, intf))
+        print("g_subscriber_port_map['%s']=%d" %(intf, i))
+
+if __name__ == '__main__':
+    num = 100
+    if len(sys.argv) > 1:
+        num = int(sys.argv[1])
+    print(header)
+    generate_port_map(num)
diff --git a/src/test/subscriber/portmaps.py b/src/test/subscriber/portmaps.py
new file mode 100644
index 0000000..1e6f03c
--- /dev/null
+++ b/src/test/subscriber/portmaps.py
@@ -0,0 +1,402 @@
+###This file is auto-generated. Do not EDIT###
+g_subscriber_port_map = {}
+g_subscriber_port_map[1]='veth0'
+g_subscriber_port_map['veth0']=1
+g_subscriber_port_map[2]='veth2'
+g_subscriber_port_map['veth2']=2
+g_subscriber_port_map[3]='veth4'
+g_subscriber_port_map['veth4']=3
+g_subscriber_port_map[4]='veth6'
+g_subscriber_port_map['veth6']=4
+g_subscriber_port_map[5]='veth8'
+g_subscriber_port_map['veth8']=5
+g_subscriber_port_map[6]='veth10'
+g_subscriber_port_map['veth10']=6
+g_subscriber_port_map[7]='veth12'
+g_subscriber_port_map['veth12']=7
+g_subscriber_port_map[8]='veth14'
+g_subscriber_port_map['veth14']=8
+g_subscriber_port_map[9]='veth16'
+g_subscriber_port_map['veth16']=9
+g_subscriber_port_map[10]='veth18'
+g_subscriber_port_map['veth18']=10
+g_subscriber_port_map[11]='veth20'
+g_subscriber_port_map['veth20']=11
+g_subscriber_port_map[12]='veth22'
+g_subscriber_port_map['veth22']=12
+g_subscriber_port_map[13]='veth24'
+g_subscriber_port_map['veth24']=13
+g_subscriber_port_map[14]='veth26'
+g_subscriber_port_map['veth26']=14
+g_subscriber_port_map[15]='veth28'
+g_subscriber_port_map['veth28']=15
+g_subscriber_port_map[16]='veth30'
+g_subscriber_port_map['veth30']=16
+g_subscriber_port_map[17]='veth32'
+g_subscriber_port_map['veth32']=17
+g_subscriber_port_map[18]='veth34'
+g_subscriber_port_map['veth34']=18
+g_subscriber_port_map[19]='veth36'
+g_subscriber_port_map['veth36']=19
+g_subscriber_port_map[20]='veth38'
+g_subscriber_port_map['veth38']=20
+g_subscriber_port_map[21]='veth40'
+g_subscriber_port_map['veth40']=21
+g_subscriber_port_map[22]='veth42'
+g_subscriber_port_map['veth42']=22
+g_subscriber_port_map[23]='veth44'
+g_subscriber_port_map['veth44']=23
+g_subscriber_port_map[24]='veth46'
+g_subscriber_port_map['veth46']=24
+g_subscriber_port_map[25]='veth48'
+g_subscriber_port_map['veth48']=25
+g_subscriber_port_map[26]='veth50'
+g_subscriber_port_map['veth50']=26
+g_subscriber_port_map[27]='veth52'
+g_subscriber_port_map['veth52']=27
+g_subscriber_port_map[28]='veth54'
+g_subscriber_port_map['veth54']=28
+g_subscriber_port_map[29]='veth56'
+g_subscriber_port_map['veth56']=29
+g_subscriber_port_map[30]='veth58'
+g_subscriber_port_map['veth58']=30
+g_subscriber_port_map[31]='veth60'
+g_subscriber_port_map['veth60']=31
+g_subscriber_port_map[32]='veth62'
+g_subscriber_port_map['veth62']=32
+g_subscriber_port_map[33]='veth64'
+g_subscriber_port_map['veth64']=33
+g_subscriber_port_map[34]='veth66'
+g_subscriber_port_map['veth66']=34
+g_subscriber_port_map[35]='veth68'
+g_subscriber_port_map['veth68']=35
+g_subscriber_port_map[36]='veth70'
+g_subscriber_port_map['veth70']=36
+g_subscriber_port_map[37]='veth72'
+g_subscriber_port_map['veth72']=37
+g_subscriber_port_map[38]='veth74'
+g_subscriber_port_map['veth74']=38
+g_subscriber_port_map[39]='veth76'
+g_subscriber_port_map['veth76']=39
+g_subscriber_port_map[40]='veth78'
+g_subscriber_port_map['veth78']=40
+g_subscriber_port_map[41]='veth80'
+g_subscriber_port_map['veth80']=41
+g_subscriber_port_map[42]='veth82'
+g_subscriber_port_map['veth82']=42
+g_subscriber_port_map[43]='veth84'
+g_subscriber_port_map['veth84']=43
+g_subscriber_port_map[44]='veth86'
+g_subscriber_port_map['veth86']=44
+g_subscriber_port_map[45]='veth88'
+g_subscriber_port_map['veth88']=45
+g_subscriber_port_map[46]='veth90'
+g_subscriber_port_map['veth90']=46
+g_subscriber_port_map[47]='veth92'
+g_subscriber_port_map['veth92']=47
+g_subscriber_port_map[48]='veth94'
+g_subscriber_port_map['veth94']=48
+g_subscriber_port_map[49]='veth96'
+g_subscriber_port_map['veth96']=49
+g_subscriber_port_map[50]='veth98'
+g_subscriber_port_map['veth98']=50
+g_subscriber_port_map[51]='veth100'
+g_subscriber_port_map['veth100']=51
+g_subscriber_port_map[52]='veth102'
+g_subscriber_port_map['veth102']=52
+g_subscriber_port_map[53]='veth104'
+g_subscriber_port_map['veth104']=53
+g_subscriber_port_map[54]='veth106'
+g_subscriber_port_map['veth106']=54
+g_subscriber_port_map[55]='veth108'
+g_subscriber_port_map['veth108']=55
+g_subscriber_port_map[56]='veth110'
+g_subscriber_port_map['veth110']=56
+g_subscriber_port_map[57]='veth112'
+g_subscriber_port_map['veth112']=57
+g_subscriber_port_map[58]='veth114'
+g_subscriber_port_map['veth114']=58
+g_subscriber_port_map[59]='veth116'
+g_subscriber_port_map['veth116']=59
+g_subscriber_port_map[60]='veth118'
+g_subscriber_port_map['veth118']=60
+g_subscriber_port_map[61]='veth120'
+g_subscriber_port_map['veth120']=61
+g_subscriber_port_map[62]='veth122'
+g_subscriber_port_map['veth122']=62
+g_subscriber_port_map[63]='veth124'
+g_subscriber_port_map['veth124']=63
+g_subscriber_port_map[64]='veth126'
+g_subscriber_port_map['veth126']=64
+g_subscriber_port_map[65]='veth128'
+g_subscriber_port_map['veth128']=65
+g_subscriber_port_map[66]='veth130'
+g_subscriber_port_map['veth130']=66
+g_subscriber_port_map[67]='veth132'
+g_subscriber_port_map['veth132']=67
+g_subscriber_port_map[68]='veth134'
+g_subscriber_port_map['veth134']=68
+g_subscriber_port_map[69]='veth136'
+g_subscriber_port_map['veth136']=69
+g_subscriber_port_map[70]='veth138'
+g_subscriber_port_map['veth138']=70
+g_subscriber_port_map[71]='veth140'
+g_subscriber_port_map['veth140']=71
+g_subscriber_port_map[72]='veth142'
+g_subscriber_port_map['veth142']=72
+g_subscriber_port_map[73]='veth144'
+g_subscriber_port_map['veth144']=73
+g_subscriber_port_map[74]='veth146'
+g_subscriber_port_map['veth146']=74
+g_subscriber_port_map[75]='veth148'
+g_subscriber_port_map['veth148']=75
+g_subscriber_port_map[76]='veth150'
+g_subscriber_port_map['veth150']=76
+g_subscriber_port_map[77]='veth152'
+g_subscriber_port_map['veth152']=77
+g_subscriber_port_map[78]='veth154'
+g_subscriber_port_map['veth154']=78
+g_subscriber_port_map[79]='veth156'
+g_subscriber_port_map['veth156']=79
+g_subscriber_port_map[80]='veth158'
+g_subscriber_port_map['veth158']=80
+g_subscriber_port_map[81]='veth160'
+g_subscriber_port_map['veth160']=81
+g_subscriber_port_map[82]='veth162'
+g_subscriber_port_map['veth162']=82
+g_subscriber_port_map[83]='veth164'
+g_subscriber_port_map['veth164']=83
+g_subscriber_port_map[84]='veth166'
+g_subscriber_port_map['veth166']=84
+g_subscriber_port_map[85]='veth168'
+g_subscriber_port_map['veth168']=85
+g_subscriber_port_map[86]='veth170'
+g_subscriber_port_map['veth170']=86
+g_subscriber_port_map[87]='veth172'
+g_subscriber_port_map['veth172']=87
+g_subscriber_port_map[88]='veth174'
+g_subscriber_port_map['veth174']=88
+g_subscriber_port_map[89]='veth176'
+g_subscriber_port_map['veth176']=89
+g_subscriber_port_map[90]='veth178'
+g_subscriber_port_map['veth178']=90
+g_subscriber_port_map[91]='veth180'
+g_subscriber_port_map['veth180']=91
+g_subscriber_port_map[92]='veth182'
+g_subscriber_port_map['veth182']=92
+g_subscriber_port_map[93]='veth184'
+g_subscriber_port_map['veth184']=93
+g_subscriber_port_map[94]='veth186'
+g_subscriber_port_map['veth186']=94
+g_subscriber_port_map[95]='veth188'
+g_subscriber_port_map['veth188']=95
+g_subscriber_port_map[96]='veth190'
+g_subscriber_port_map['veth190']=96
+g_subscriber_port_map[97]='veth192'
+g_subscriber_port_map['veth192']=97
+g_subscriber_port_map[98]='veth194'
+g_subscriber_port_map['veth194']=98
+g_subscriber_port_map[99]='veth196'
+g_subscriber_port_map['veth196']=99
+g_subscriber_port_map[100]='veth198'
+g_subscriber_port_map['veth198']=100
+g_subscriber_port_map[101]='veth200'
+g_subscriber_port_map['veth200']=101
+g_subscriber_port_map[102]='veth202'
+g_subscriber_port_map['veth202']=102
+g_subscriber_port_map[103]='veth204'
+g_subscriber_port_map['veth204']=103
+g_subscriber_port_map[104]='veth206'
+g_subscriber_port_map['veth206']=104
+g_subscriber_port_map[105]='veth208'
+g_subscriber_port_map['veth208']=105
+g_subscriber_port_map[106]='veth210'
+g_subscriber_port_map['veth210']=106
+g_subscriber_port_map[107]='veth212'
+g_subscriber_port_map['veth212']=107
+g_subscriber_port_map[108]='veth214'
+g_subscriber_port_map['veth214']=108
+g_subscriber_port_map[109]='veth216'
+g_subscriber_port_map['veth216']=109
+g_subscriber_port_map[110]='veth218'
+g_subscriber_port_map['veth218']=110
+g_subscriber_port_map[111]='veth220'
+g_subscriber_port_map['veth220']=111
+g_subscriber_port_map[112]='veth222'
+g_subscriber_port_map['veth222']=112
+g_subscriber_port_map[113]='veth224'
+g_subscriber_port_map['veth224']=113
+g_subscriber_port_map[114]='veth226'
+g_subscriber_port_map['veth226']=114
+g_subscriber_port_map[115]='veth228'
+g_subscriber_port_map['veth228']=115
+g_subscriber_port_map[116]='veth230'
+g_subscriber_port_map['veth230']=116
+g_subscriber_port_map[117]='veth232'
+g_subscriber_port_map['veth232']=117
+g_subscriber_port_map[118]='veth234'
+g_subscriber_port_map['veth234']=118
+g_subscriber_port_map[119]='veth236'
+g_subscriber_port_map['veth236']=119
+g_subscriber_port_map[120]='veth238'
+g_subscriber_port_map['veth238']=120
+g_subscriber_port_map[121]='veth240'
+g_subscriber_port_map['veth240']=121
+g_subscriber_port_map[122]='veth242'
+g_subscriber_port_map['veth242']=122
+g_subscriber_port_map[123]='veth244'
+g_subscriber_port_map['veth244']=123
+g_subscriber_port_map[124]='veth246'
+g_subscriber_port_map['veth246']=124
+g_subscriber_port_map[125]='veth248'
+g_subscriber_port_map['veth248']=125
+g_subscriber_port_map[126]='veth250'
+g_subscriber_port_map['veth250']=126
+g_subscriber_port_map[127]='veth252'
+g_subscriber_port_map['veth252']=127
+g_subscriber_port_map[128]='veth254'
+g_subscriber_port_map['veth254']=128
+g_subscriber_port_map[129]='veth256'
+g_subscriber_port_map['veth256']=129
+g_subscriber_port_map[130]='veth258'
+g_subscriber_port_map['veth258']=130
+g_subscriber_port_map[131]='veth260'
+g_subscriber_port_map['veth260']=131
+g_subscriber_port_map[132]='veth262'
+g_subscriber_port_map['veth262']=132
+g_subscriber_port_map[133]='veth264'
+g_subscriber_port_map['veth264']=133
+g_subscriber_port_map[134]='veth266'
+g_subscriber_port_map['veth266']=134
+g_subscriber_port_map[135]='veth268'
+g_subscriber_port_map['veth268']=135
+g_subscriber_port_map[136]='veth270'
+g_subscriber_port_map['veth270']=136
+g_subscriber_port_map[137]='veth272'
+g_subscriber_port_map['veth272']=137
+g_subscriber_port_map[138]='veth274'
+g_subscriber_port_map['veth274']=138
+g_subscriber_port_map[139]='veth276'
+g_subscriber_port_map['veth276']=139
+g_subscriber_port_map[140]='veth278'
+g_subscriber_port_map['veth278']=140
+g_subscriber_port_map[141]='veth280'
+g_subscriber_port_map['veth280']=141
+g_subscriber_port_map[142]='veth282'
+g_subscriber_port_map['veth282']=142
+g_subscriber_port_map[143]='veth284'
+g_subscriber_port_map['veth284']=143
+g_subscriber_port_map[144]='veth286'
+g_subscriber_port_map['veth286']=144
+g_subscriber_port_map[145]='veth288'
+g_subscriber_port_map['veth288']=145
+g_subscriber_port_map[146]='veth290'
+g_subscriber_port_map['veth290']=146
+g_subscriber_port_map[147]='veth292'
+g_subscriber_port_map['veth292']=147
+g_subscriber_port_map[148]='veth294'
+g_subscriber_port_map['veth294']=148
+g_subscriber_port_map[149]='veth296'
+g_subscriber_port_map['veth296']=149
+g_subscriber_port_map[150]='veth298'
+g_subscriber_port_map['veth298']=150
+g_subscriber_port_map[151]='veth300'
+g_subscriber_port_map['veth300']=151
+g_subscriber_port_map[152]='veth302'
+g_subscriber_port_map['veth302']=152
+g_subscriber_port_map[153]='veth304'
+g_subscriber_port_map['veth304']=153
+g_subscriber_port_map[154]='veth306'
+g_subscriber_port_map['veth306']=154
+g_subscriber_port_map[155]='veth308'
+g_subscriber_port_map['veth308']=155
+g_subscriber_port_map[156]='veth310'
+g_subscriber_port_map['veth310']=156
+g_subscriber_port_map[157]='veth312'
+g_subscriber_port_map['veth312']=157
+g_subscriber_port_map[158]='veth314'
+g_subscriber_port_map['veth314']=158
+g_subscriber_port_map[159]='veth316'
+g_subscriber_port_map['veth316']=159
+g_subscriber_port_map[160]='veth318'
+g_subscriber_port_map['veth318']=160
+g_subscriber_port_map[161]='veth320'
+g_subscriber_port_map['veth320']=161
+g_subscriber_port_map[162]='veth322'
+g_subscriber_port_map['veth322']=162
+g_subscriber_port_map[163]='veth324'
+g_subscriber_port_map['veth324']=163
+g_subscriber_port_map[164]='veth326'
+g_subscriber_port_map['veth326']=164
+g_subscriber_port_map[165]='veth328'
+g_subscriber_port_map['veth328']=165
+g_subscriber_port_map[166]='veth330'
+g_subscriber_port_map['veth330']=166
+g_subscriber_port_map[167]='veth332'
+g_subscriber_port_map['veth332']=167
+g_subscriber_port_map[168]='veth334'
+g_subscriber_port_map['veth334']=168
+g_subscriber_port_map[169]='veth336'
+g_subscriber_port_map['veth336']=169
+g_subscriber_port_map[170]='veth338'
+g_subscriber_port_map['veth338']=170
+g_subscriber_port_map[171]='veth340'
+g_subscriber_port_map['veth340']=171
+g_subscriber_port_map[172]='veth342'
+g_subscriber_port_map['veth342']=172
+g_subscriber_port_map[173]='veth344'
+g_subscriber_port_map['veth344']=173
+g_subscriber_port_map[174]='veth346'
+g_subscriber_port_map['veth346']=174
+g_subscriber_port_map[175]='veth348'
+g_subscriber_port_map['veth348']=175
+g_subscriber_port_map[176]='veth350'
+g_subscriber_port_map['veth350']=176
+g_subscriber_port_map[177]='veth352'
+g_subscriber_port_map['veth352']=177
+g_subscriber_port_map[178]='veth354'
+g_subscriber_port_map['veth354']=178
+g_subscriber_port_map[179]='veth356'
+g_subscriber_port_map['veth356']=179
+g_subscriber_port_map[180]='veth358'
+g_subscriber_port_map['veth358']=180
+g_subscriber_port_map[181]='veth360'
+g_subscriber_port_map['veth360']=181
+g_subscriber_port_map[182]='veth362'
+g_subscriber_port_map['veth362']=182
+g_subscriber_port_map[183]='veth364'
+g_subscriber_port_map['veth364']=183
+g_subscriber_port_map[184]='veth366'
+g_subscriber_port_map['veth366']=184
+g_subscriber_port_map[185]='veth368'
+g_subscriber_port_map['veth368']=185
+g_subscriber_port_map[186]='veth370'
+g_subscriber_port_map['veth370']=186
+g_subscriber_port_map[187]='veth372'
+g_subscriber_port_map['veth372']=187
+g_subscriber_port_map[188]='veth374'
+g_subscriber_port_map['veth374']=188
+g_subscriber_port_map[189]='veth376'
+g_subscriber_port_map['veth376']=189
+g_subscriber_port_map[190]='veth378'
+g_subscriber_port_map['veth378']=190
+g_subscriber_port_map[191]='veth380'
+g_subscriber_port_map['veth380']=191
+g_subscriber_port_map[192]='veth382'
+g_subscriber_port_map['veth382']=192
+g_subscriber_port_map[193]='veth384'
+g_subscriber_port_map['veth384']=193
+g_subscriber_port_map[194]='veth386'
+g_subscriber_port_map['veth386']=194
+g_subscriber_port_map[195]='veth388'
+g_subscriber_port_map['veth388']=195
+g_subscriber_port_map[196]='veth390'
+g_subscriber_port_map['veth390']=196
+g_subscriber_port_map[197]='veth392'
+g_subscriber_port_map['veth392']=197
+g_subscriber_port_map[198]='veth394'
+g_subscriber_port_map['veth394']=198
+g_subscriber_port_map[199]='veth396'
+g_subscriber_port_map['veth396']=199
+g_subscriber_port_map[200]='veth398'
+g_subscriber_port_map['veth398']=200
diff --git a/src/test/subscriber/subscriberDb.py b/src/test/subscriber/subscriberDb.py
new file mode 100644
index 0000000..d6b3f75
--- /dev/null
+++ b/src/test/subscriber/subscriberDb.py
@@ -0,0 +1,61 @@
+import sqlite3
+import sys
+
+class SubscriberDB:
+    def __init__(self, db = 'subscriber.db', create = False):
+        self.db = db
+        self.con = sqlite3.connect(db)
+        self.con.row_factory = sqlite3.Row
+        self.cur = self.con.cursor()
+        self.services = [ 'DHCP', 'IGMP' ]
+        self.create = create
+        if create == True:
+            self.cur.execute("DROP TABLE IF EXISTS Subscriber")
+            self.cur.execute("CREATE TABLE Subscriber(Id INTEGER PRIMARY KEY, Name TEXT, Service TEXT);")
+
+    def load(self, name, service):
+        self.cur.execute("INSERT INTO Subscriber(Name, Service) VALUES (?, ?);", (name, service))
+
+    def commit(self):
+        self.con.commit()
+
+    def generate(self, num = 100):
+        #create db if not created
+        if self.create is False:
+            self.cur.execute("DROP TABLE IF EXISTS Subscriber")
+            self.cur.execute("CREATE TABLE Subscriber(Id INTEGER PRIMARY KEY, Name TEXT, Service TEXT);")
+            self.create = True
+        service = ' '.join(self.services)
+        for i in xrange(num):
+            name = "sub%d" %self.lastrowid()
+            self.load(name, service)
+        self.commit()
+
+    def read(self, num = 1000000, debug = False):
+        self.cur.execute("SELECT * FROM Subscriber LIMIT ?;", (num,))
+        rows = self.cur.fetchall()
+        if debug is True:
+            for row in rows:
+                print('Id %d, Name %s, Service %s' %(row['Id'], row['Name'], row['Service']))
+        return rows
+
+    def lastrowid(self):
+        return 0 if self.cur.lastrowid == None else self.cur.lastrowid
+
+if __name__ == "__main__":
+    create = False
+    if len(sys.argv) > 1:
+        try:
+            num_subscribers = int(sys.argv[1])
+        except:
+            num_subscribers = 100
+        print('Creating %d subscriber records' %num_subscribers)
+        create = True
+    sub = SubscriberDB(create = create)
+    if create == True:
+        sub.generate(num_subscribers)
+    else:
+        num_subscribers = 10
+    subscribers = sub.read(num_subscribers)
+    for s in subscribers:
+        print('Name %s, Service %s' %(s['Name'], s['Service']))
diff --git a/src/test/subscriber/subscriberTest.py b/src/test/subscriber/subscriberTest.py
new file mode 100644
index 0000000..ac01950
--- /dev/null
+++ b/src/test/subscriber/subscriberTest.py
@@ -0,0 +1,402 @@
+import unittest
+from nose.tools import *
+from nose.twistedtools import reactor, deferred
+from twisted.internet import defer
+from scapy.all import *
+import time, monotonic
+import os, sys
+import tempfile
+import random
+import threading
+from Stats import Stats
+from OnosCtrl import OnosCtrl
+from DHCP import DHCPTest
+from EapTLS import TLSAuthTest
+from Channels import Channels, IgmpChannel
+from subscriberDb import SubscriberDB
+from threadPool import ThreadPool
+from portmaps import g_subscriber_port_map 
+from OltConfig import *
+log.setLevel('INFO')
+
+class Subscriber(Channels):
+      PORT_TX_DEFAULT = 2
+      PORT_RX_DEFAULT = 1
+      INTF_TX_DEFAULT = 'veth2'
+      INTF_RX_DEFAULT = 'veth0'
+      STATS_RX = 0
+      STATS_TX = 1
+      STATS_JOIN = 2
+      STATS_LEAVE = 3
+      SUBSCRIBER_SERVICES = 'DHCP IGMP TLS'
+      def __init__(self, name = 'sub', service = SUBSCRIBER_SERVICES, port_map = None,
+                   num = 1, channel_start = 0,
+                   tx_port = PORT_TX_DEFAULT, rx_port = PORT_RX_DEFAULT,
+                   iface = INTF_RX_DEFAULT, iface_mcast = INTF_TX_DEFAULT,
+                   mcast_cb = None, loginType = 'wireless'):
+            self.tx_port = tx_port
+            self.rx_port = rx_port
+            self.port_map = port_map or g_subscriber_port_map
+            try:
+                  self.tx_intf = self.port_map[tx_port]
+                  self.rx_intf = self.port_map[rx_port]
+            except:
+                  self.tx_intf = self.port_map[self.PORT_TX_DEFAULT]
+                  self.rx_intf = self.port_map[self.PORT_RX_DEFAULT]
+
+            Channels.__init__(self, num, channel_start = channel_start, 
+                              iface = self.rx_intf, iface_mcast = self.tx_intf, mcast_cb = mcast_cb)
+            self.name = name
+            self.service = service
+            self.service_map = {}
+            services = self.service.strip().split(' ')
+            for s in services:
+                  self.service_map[s] = True
+            self.loginType = loginType
+            ##start streaming channels
+            self.join_map = {}
+            ##accumulated join recv stats
+            self.join_rx_stats = Stats()
+
+      def has_service(self, service):
+            if self.service_map.has_key(service):
+                  return self.service_map[service]
+            if self.service_map.has_key(service.upper()):
+                  return self.service_map[service.upper()]
+            return False
+
+      def channel_join_update(self, chan, join_time):
+            self.join_map[chan] = ( Stats(), Stats(), Stats(), Stats() )
+            self.channel_update(chan, self.STATS_JOIN, 1, t = join_time)
+
+      def channel_join(self, chan = 0, delay = 2):
+            '''Join a channel and create a send/recv stats map'''
+            if self.join_map.has_key(chan):
+                  del self.join_map[chan]
+            self.delay = delay
+            chan, join_time = self.join(chan)
+            self.channel_join_update(chan, join_time)
+            return chan
+
+      def channel_join_next(self, delay = 2):
+            '''Joins the next channel leaving the last channel'''
+            if self.last_chan:
+                  if self.join_map.has_key(self.last_chan):
+                        del self.join_map[self.last_chan]
+            self.delay = delay
+            chan, join_time = self.join_next()
+            self.channel_join_update(chan, join_time)
+            return chan
+
+      def channel_jump(self, delay = 2):
+            '''Jumps randomly to the next channel leaving the last channel'''
+            if self.last_chan is not None:
+                  if self.join_map.has_key(self.last_chan):
+                        del self.join_map[self.last_chan]
+            self.delay = delay
+            chan, join_time = self.jump()
+            self.channel_join_update(chan, join_time)
+            return chan
+
+      def channel_leave(self, chan = 0):
+            if self.join_map.has_key(chan):
+                  del self.join_map[chan]
+            self.leave(chan)
+
+      def channel_update(self, chan, stats_type, packets, t=0):
+            if type(chan) == type(0):
+                  chan_list = (chan,)
+            else:
+                  chan_list = chan
+            for c in chan_list: 
+                  if self.join_map.has_key(c):
+                        self.join_map[c][stats_type].update(packets = packets, t = t)
+
+      def channel_receive(self, chan, cb = None, count = 1):
+            log.info('Subscriber %s receiving from group %s, channel %d' %(self.name, self.gaddr(chan), chan))
+            self.recv(chan, cb = cb, count = count)
+
+      def recv_channel_cb(self, pkt):
+            ##First verify that we have received the packet for the joined instance
+            log.debug('Packet received for group %s, subscriber %s' %(pkt[IP].dst, self.name))
+            chan = self.caddr(pkt[IP].dst)
+            assert_equal(chan in self.join_map.keys(), True)
+            recv_time = monotonic.monotonic() * 1000000
+            join_time = self.join_map[chan][self.STATS_JOIN].start
+            delta = recv_time - join_time
+            self.join_rx_stats.update(packets=1, t = delta, usecs = True)
+            self.channel_update(chan, self.STATS_RX, 1, t = delta)
+            log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
+
+class subscriber_pool:
+
+      def __init__(self, subscriber, test_cbs):
+            self.subscriber = subscriber
+            self.test_cbs = test_cbs
+
+      def pool_cb(self):
+            for cb in self.test_cbs:
+                  if cb:
+                        cb(self.subscriber)
+      
+class subscriber_exchange(unittest.TestCase):
+
+      apps = [ 'org.onosproject.aaa', 'org.onosproject.dhcp' ]
+      olt_apps = [ 'org.onosproject.igmp', 'org.onosproject.cordmcast' ]
+      dhcp_server_config = {
+        "ip": "10.1.11.50",
+        "mac": "ca:fe:ca:fe:ca:fe",
+        "subnet": "255.255.252.0",
+        "broadcast": "10.1.11.255",
+        "router": "10.1.8.1",
+        "domain": "8.8.8.8",
+        "ttl": "63",
+        "delay": "2",
+        "startip": "10.1.11.51",
+        "endip": "10.1.11.100"
+      }
+
+      aaa_loaded = False
+
+      def setUp(self):
+          '''Load the OLT config and activate relevant apps'''
+          self.olt = OltConfig()
+          self.port_map = self.olt.olt_port_map()
+          ##if no olt config, fall back to ovs port map
+          if not self.port_map:
+                self.port_map = g_subscriber_port_map
+          else:
+                log.info('Using OLT Port configuration for test setup')
+                log.info('Configuring CORD OLT access device information')
+                OnosCtrl.cord_olt_config(self.olt.olt_device_data())
+                self.activate_apps(self.olt_apps)
+
+          self.activate_apps(self.apps)
+
+      def teardown(self):
+          '''Deactivate the dhcp app'''
+          for app in self.apps:
+              onos_ctrl = OnosCtrl(app)
+              onos_ctrl.deactivate()
+
+      def activate_apps(self, apps):
+            for app in self.olt_apps:
+                  onos_ctrl = OnosCtrl(app)
+                  status, _ = onos_ctrl.activate()
+                  assert_equal(status, True)
+                  time.sleep(2)
+
+      def onos_aaa_load(self):
+            if self.aaa_loaded:
+                  return
+            aaa_dict = {'apps' : { 'org.onosproject.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password', 
+                                                                       'radiusIp': '172.17.0.2' } } } }
+            radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
+            aaa_dict['apps']['org.onosproject.aaa']['AAA']['radiusIp'] = radius_ip
+            self.onos_load_config('org.onosproject.aaa', aaa_dict)
+            self.aaa_loaded = True
+
+      def onos_dhcp_table_load(self, config = None):
+          dhcp_dict = {'apps' : { 'org.onosproject.dhcp' : { 'dhcp' : copy.copy(self.dhcp_server_config) } } }
+          dhcp_config = dhcp_dict['apps']['org.onosproject.dhcp']['dhcp']
+          if config:
+              for k in config.keys():
+                  if dhcp_config.has_key(k):
+                      dhcp_config[k] = config[k]
+          self.onos_load_config('org.onosproject.dhcp', dhcp_dict)
+
+      def onos_load_config(self, app, config):
+          status, code = OnosCtrl.config(config)
+          if status is False:
+             log.info('JSON config request for app %s returned status %d' %(app, code))
+             assert_equal(status, True)
+          time.sleep(2)
+
+      def dhcp_sndrcv(self, dhcp, update_seed = False):
+            cip, sip = dhcp.discover(update_seed = update_seed)
+            assert_not_equal(cip, None)
+            assert_not_equal(sip, None)
+            log.info('Got dhcp client IP %s from server %s for mac %s' %
+                     (cip, sip, dhcp.get_mac(cip)[0]))
+            return cip,sip
+
+      def dhcp_request(self, subscriber, seed_ip = '10.10.10.1', update_seed = False):
+            config = {'startip':'10.10.10.20', 'endip':'10.10.10.200',
+                      'ip':'10.10.10.2', 'mac': "ca:fe:ca:fe:ca:fe",
+                      'subnet': '255.255.255.0', 'broadcast':'10.10.10.255', 'router':'10.10.10.1'}
+            self.onos_dhcp_table_load(config)
+            dhcp = DHCPTest(seed_ip = seed_ip, iface = subscriber.iface)
+            cip, sip = self.dhcp_sndrcv(dhcp, update_seed = update_seed)
+            return cip, sip
+
+      def recv_channel_cb(self, pkt):
+            ##First verify that we have received the packet for the joined instance
+            chan = self.subscriber.caddr(pkt[IP].dst)
+            assert_equal(chan in self.subscriber.join_map.keys(), True)
+            recv_time = monotonic.monotonic() * 1000000
+            join_time = self.subscriber.join_map[chan][self.subscriber.STATS_JOIN].start
+            delta = recv_time - join_time
+            self.subscriber.join_rx_stats.update(packets=1, t = delta, usecs = True)
+            self.subscriber.channel_update(chan, self.subscriber.STATS_RX, 1, t = delta)
+            log.debug('Packet received in %.3f usecs for group %s after join' %(delta, pkt[IP].dst))
+            self.test_status = True
+
+      def tls_verify(self, subscriber):
+            if subscriber.has_service('TLS'):
+                  time.sleep(2)
+                  tls = TLSAuthTest()
+                  log.info('Running subscriber %s tls auth test' %subscriber.name)
+                  tls.runTest()
+                  self.test_status = True
+
+      def dhcp_verify(self, subscriber):
+            cip, sip = self.dhcp_request(subscriber, update_seed = True)
+            log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
+            subscriber.src_list = [cip]
+            self.test_status = True
+
+      def dhcp_jump_verify(self, subscriber):
+          cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.200.1')
+          log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
+          subscriber.src_list = [cip]
+          self.test_status = True
+
+      def dhcp_next_verify(self, subscriber):
+          cip, sip = self.dhcp_request(subscriber, seed_ip = '10.10.150.1')
+          log.info('Subscriber %s got client ip %s from server %s' %(subscriber.name, cip, sip))
+          subscriber.src_list = [cip]
+          self.test_status = True
+
+      def igmp_verify(self, subscriber):
+            chan = 0
+            if subscriber.has_service('IGMP'):
+                  for i in range(5):
+                        log.info('Joining channel %d for subscriber %s' %(chan, subscriber.name))
+                        subscriber.channel_join(chan, delay = 0)
+                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
+                        log.info('Leaving channel %d for subscriber %s' %(chan, subscriber.name))
+                        subscriber.channel_leave(chan)
+                        time.sleep(3)
+                        log.info('Interface %s Join RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name,subscriber.join_rx_stats))
+                  self.test_status = True
+
+      def igmp_jump_verify(self, subscriber):
+            if subscriber.has_service('IGMP'):
+                  for i in xrange(subscriber.num):
+                        log.info('Subscriber %s jumping channel' %subscriber.name)
+                        chan = subscriber.channel_jump(delay=0)
+                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count = 1)
+                        log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
+                        time.sleep(3)
+                  log.info('Interface %s Jump RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
+                  self.test_status = True
+
+      def igmp_next_verify(self, subscriber):
+            if subscriber.has_service('IGMP'):
+                  for i in xrange(subscriber.num):
+                        if i:
+                              chan = subscriber.channel_join_next(delay=0)
+                        else:
+                              chan = subscriber.channel_join(i, delay=0)
+                        log.info('Joined next channel %d for subscriber %s' %(chan, subscriber.name))
+                        subscriber.channel_receive(chan, cb = subscriber.recv_channel_cb, count=1)
+                        log.info('Verified receive for channel %d, subscriber %s' %(chan, subscriber.name))
+                        time.sleep(3)
+                  log.info('Interface %s Join Next RX stats for subscriber %s, %s' %(subscriber.iface, subscriber.name, subscriber.join_rx_stats))
+                  self.test_status = True
+
+      def generate_port_list(self, subscribers, channels):
+            port_list = []
+            for i in xrange(subscribers):
+                  if channels > 1:
+                        rx_port = 2*i+1
+                        tx_port = 2*i+2
+                  else:
+                        rx_port = Subscriber.PORT_RX_DEFAULT
+                        tx_port = Subscriber.PORT_TX_DEFAULT
+                  port_list.append((tx_port, rx_port))
+            return port_list
+
+      def subscriber_load(self, create = True, num = 10, num_channels = 1, channel_start = 0, port_list = []):
+            '''Load the subscriber from the database'''
+            self.subscriber_db = SubscriberDB(create = create)
+            if create is True:
+                  self.subscriber_db.generate(num)
+            self.subscriber_info = self.subscriber_db.read(num)
+            self.subscriber_list = []
+            if not port_list:
+                  port_list = self.generate_port_list(num, num_channels)
+
+            index = 0
+            for info in self.subscriber_info:
+                  self.subscriber_list.append(Subscriber(name=info['Name'], 
+                                                         service=info['Service'],
+                                                         port_map = self.port_map,
+                                                         num=num_channels,
+                                                         channel_start = channel_start,
+                                                         tx_port = port_list[index][0],
+                                                         rx_port = port_list[index][1]))
+                  if num_channels > 1:
+                        channel_start += num_channels
+                  index += 1
+
+            #load the ssm list for all subscriber channels
+            igmpChannel = IgmpChannel()
+            ssm_groups = map(lambda sub: sub.channels, self.subscriber_list)
+            ssm_list = reduce(lambda ssm1, ssm2: ssm1+ssm2, ssm_groups)
+            igmpChannel.igmp_load_ssm_config(ssm_list)
+            #load the subscriber to mcast port map for cord
+            cord_port_map = {}
+            for sub in self.subscriber_list:
+                  for chan in sub.channels:
+                        cord_port_map[chan] = (sub.tx_port, sub.rx_port)
+
+            igmpChannel.cord_port_table_load(cord_port_map)
+
+      def subscriber_join_verify( self, num_subscribers = 10, num_channels = 1, 
+                                  channel_start = 0, cbs = None, port_list = []):
+          self.test_status = False
+          self.num_subscribers = num_subscribers
+          self.subscriber_load(create = True, num = num_subscribers,
+                               num_channels = num_channels, channel_start = channel_start, port_list = port_list)
+          self.onos_aaa_load()
+          self.thread_pool = ThreadPool(min(100, self.num_subscribers), queue_size=1, wait_timeout=1)
+          if cbs is None:
+                cbs = (self.tls_verify, self.dhcp_verify, self.igmp_verify)
+          for subscriber in self.subscriber_list:
+                subscriber.start()
+                pool_object = subscriber_pool(subscriber, cbs)
+                self.thread_pool.addTask(pool_object.pool_cb)
+          self.thread_pool.cleanUpThreads()
+          for subscriber in self.subscriber_list:
+                subscriber.stop()
+          return self.test_status
+
+      def test_subscriber_join_recv(self):
+          """Test subscriber join and receive"""
+          num_subscribers = 50
+          num_channels = 1
+          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers, 
+                                                    num_channels = num_channels,
+                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
+          assert_equal(test_status, True)
+
+      def test_subscriber_join_jump(self):
+          """Test subscriber join and receive for channel surfing""" 
+          num_subscribers = 5
+          num_channels = 50
+          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers, 
+                                                    num_channels = num_channels,
+                                                    cbs = (self.tls_verify, self.dhcp_jump_verify, self.igmp_jump_verify),
+                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
+          assert_equal(test_status, True)
+
+      def test_subscriber_join_next(self):
+          """Test subscriber join next for channels"""
+          num_subscribers = 5
+          num_channels = 50
+          test_status = self.subscriber_join_verify(num_subscribers = num_subscribers, 
+                                                    num_channels = num_channels,
+                                                    cbs = (self.tls_verify, self.dhcp_next_verify, self.igmp_next_verify),
+                                                    port_list = self.generate_port_list(num_subscribers, num_channels))
+          assert_equal(test_status, True)
diff --git a/src/test/tls/__init__.py b/src/test/tls/__init__.py
new file mode 100644
index 0000000..a881eb6
--- /dev/null
+++ b/src/test/tls/__init__.py
@@ -0,0 +1,7 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
diff --git a/src/test/tls/tlsAuthTest.py b/src/test/tls/tlsAuthTest.py
index 22d1c76..309b1e4 100644
--- a/src/test/tls/tlsAuthTest.py
+++ b/src/test/tls/tlsAuthTest.py
@@ -1,11 +1,34 @@
 import unittest
-import os,sys
-CORD_TEST_UTILS = 'utils'
-test_root = os.getenv('CORD_TEST_ROOT') or './'
-sys.path.append(test_root + CORD_TEST_UTILS)
+import time
+import os
+from nose.tools import *
 from EapTLS import TLSAuthTest
+from OnosCtrl import OnosCtrl
 
 class eap_auth_exchange(unittest.TestCase):
+
+      app = 'org.onosproject.aaa'
+
+      def setUp(self):
+            self.onos_ctrl = OnosCtrl(self.app)
+            self.onos_aaa_config()
+
+      def onos_aaa_config(self):
+            aaa_dict = {'apps' : { 'org.onosproject.aaa' : { 'AAA' : { 'radiusSecret': 'radius_password', 
+                                                                   'radiusIp': '172.17.0.2' } } } }
+            radius_ip = os.getenv('ONOS_AAA_IP') or '172.17.0.2'
+            aaa_dict['apps']['org.onosproject.aaa']['AAA']['radiusIp'] = radius_ip
+            self.onos_ctrl.activate()
+            time.sleep(2)
+            self.onos_load_config(aaa_dict)
+
+      def onos_load_config(self, config):
+            status, code = OnosCtrl.config(config)
+            if status is False:
+                  log.info('Configure request for AAA returned status %d' %code)
+                  assert_equal(status, True)
+            time.sleep(3)
+            
       def test_eap_tls(self):
           tls = TLSAuthTest()
           tls.runTest()
diff --git a/src/test/utils/Channels.py b/src/test/utils/Channels.py
new file mode 100644
index 0000000..e0b11c8
--- /dev/null
+++ b/src/test/utils/Channels.py
@@ -0,0 +1,266 @@
+import threading
+import sys
+import os
+import time
+import monotonic
+import random
+from scapy.all import *
+from McastTraffic import *
+from IGMP import *
+from OnosCtrl import OnosCtrl
+from nose.tools import *
+log.setLevel('DEBUG')
+
+conf.verb = 0
+
+class IgmpChannel:
+
+    IGMP_DST_MAC = "01:00:5e:00:01:01"
+    IGMP_SRC_MAC = "5a:e1:ac:ec:4d:a1"
+    IP_SRC = '1.2.3.4'
+    IP_DST = '224.0.1.1'
+    igmp_eth = Ether(dst = IGMP_DST_MAC, src = IGMP_SRC_MAC, type = ETH_P_IP)
+    igmp_ip = IP(dst = IP_DST, src = IP_SRC)
+    ssm_list = [] 
+
+    def __init__(self, iface = 'veth0', ssm_list = [], src_list = ['1.2.3.4'], delay = 2):
+        self.iface = iface
+        self.ssm_list += ssm_list
+        self.src_list = src_list
+        self.delay = delay
+        self.onos_ctrl = OnosCtrl('org.onosproject.igmp')
+        self.onos_ctrl.activate()
+    
+    def igmp_load_ssm_config(self, ssm_list = []):
+        if not ssm_list:
+            ssm_list = self.ssm_list
+        self.ssm_table_load(ssm_list)
+
+    def igmp_join(self, groups):
+        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                      gaddr='224.0.1.1')
+        for g in groups:
+              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_EXCLUDE, mcaddr=g)
+              gr.sources = self.src_list
+              igmp.grps.append(gr)
+
+        pkt = self.igmp_eth/self.igmp_ip/igmp
+        IGMPv3.fixup(pkt)
+        sendp(pkt, iface=self.iface)
+        if self.delay != 0:
+            time.sleep(self.delay)
+
+    def igmp_leave(self, groups):
+        igmp = IGMPv3(type = IGMP_TYPE_V3_MEMBERSHIP_REPORT, max_resp_code=30,
+                      gaddr='224.0.1.1')
+        for g in groups:
+              gr = IGMPv3gr(rtype=IGMP_V3_GR_TYPE_INCLUDE, mcaddr=g)
+              gr.sources = self.src_list
+              igmp.grps.append(gr)
+
+        pkt = self.igmp_eth/self.igmp_ip/igmp
+        IGMPv3.fixup(pkt)
+        sendp(pkt, iface = self.iface)
+        if self.delay != 0:
+            time.sleep(self.delay)
+
+    def onos_load_config(self, config):
+        status, code = OnosCtrl.config(config)
+        if status is False:
+            log.info('JSON config request returned status %d' %code)
+        time.sleep(2)
+
+    def ssm_table_load(self, groups):
+          ssm_dict = {'apps' : { 'org.onosproject.igmp' : { 'ssmTranslate' : [] } } }
+          ssm_xlate_list = ssm_dict['apps']['org.onosproject.igmp']['ssmTranslate']
+          for g in groups:
+                for s in self.src_list:
+                      d = {}
+                      d['source'] = s
+                      d['group'] = g
+                      ssm_xlate_list.append(d)
+          self.onos_load_config(ssm_dict)
+
+    def cord_port_table_load(self, cord_port_map):
+          cord_group_dict = {'apps' : { 'org.ciena.cordigmp' : { 'cordIgmpTranslate' : [] } } }
+          cord_group_xlate_list = cord_group_dict['apps']['org.ciena.cordigmp']['cordIgmpTranslate']
+          for group, ports in cord_port_map.items():
+              d = {}
+              d['group'] = group
+              d['inputPort'] = ports[0]
+              d['outputPort'] = ports[1]
+              cord_group_xlate_list.append(d)
+          self.onos_load_config(cord_group_dict)
+
+class Channels(IgmpChannel):
+    Stopped = 0
+    Started = 1
+    Idle = 0
+    Joined = 1
+    def __init__(self, num, channel_start = 0, iface = 'veth0', iface_mcast = 'veth2', mcast_cb = None):
+        self.num = num
+        self.channel_start = channel_start
+        self.channels = self.generate(self.num, self.channel_start)
+        self.group_channel_map = {}
+        #assert_equal(len(self.channels), self.num)
+        for i in range(self.num):
+            self.group_channel_map[self.channels[i]] = i
+        self.state = self.Stopped
+        self.streams = None
+        self.channel_states = {}
+        self.last_chan = None
+        self.recv_sock = L2Socket(iface = iface, type = ETH_P_IP)
+        self.iface_mcast = iface_mcast
+        self.mcast_cb = mcast_cb
+        for c in range(self.num):
+            self.channel_states[c] = [self.Idle]
+        IgmpChannel.__init__(self, ssm_list = self.channels, iface=iface)
+        
+    def generate(self, num, channel_start = 0):
+        start = (224 << 24) | ( ( (channel_start >> 16) & 0xff) << 16 ) | \
+            ( ( (channel_start >> 8) & 0xff ) << 8 ) | (channel_start) & 0xff
+        start += channel_start/256 + 1
+        end = start + num
+        group_addrs = []
+        count = 0
+        while count != num:
+            for i in range(start, end):
+                if i&255:
+                    g = '%s.%s.%s.%s' %((i>>24) &0xff, (i>>16)&0xff, (i>>8)&0xff, i&0xff)
+                    log.debug('Adding group %s' %g)
+                    group_addrs.append(g)
+                    count += 1
+            start = end
+            end = start + 1
+        return group_addrs
+
+    def start(self):
+        if self.state == self.Stopped:
+            if self.streams:
+                self.streams.stop()
+            self.streams = McastTraffic(self.channels, iface=self.iface_mcast, cb = self.mcast_cb)
+            self.streams.start()
+            self.state = self.Started
+
+    def join(self, chan = None):
+        if chan is None:
+            chan = random.randint(0, self.num)
+        else:
+            if chan >= self.num:
+                chan = 0
+
+        if self.get_state(chan) == self.Joined:
+            return chan, 0
+
+        groups = [self.channels[chan]]
+        join_start = monotonic.monotonic()
+        self.igmp_join(groups)
+        self.set_state(chan, self.Joined)
+        self.last_chan = chan
+        return chan, join_start
+
+    def leave(self, chan):
+        if chan is None:
+            chan = self.last_chan
+        if chan is None or chan >= self.num:
+            return False
+        if self.get_state(chan) != self.Joined:
+            return False
+        groups = [self.channels[chan]]
+        self.igmp_leave(groups)
+        self.set_state(chan, self.Idle)
+        if chan == self.last_chan:
+            self.last_chan = None
+        return True
+    
+    def join_next(self, chan = None):
+        if chan is None:
+            chan = self.last_chan
+            if chan is None:
+                return None
+            leave = chan
+            join = chan+1
+        else:
+            leave = chan - 1
+            join = chan
+        
+        if join >= self.num:
+            join = 0
+
+        if leave >= 0 and leave != join:
+            self.leave(leave)
+
+        return self.join(join)
+
+    def jump(self):
+        chan = self.last_chan
+        if chan is not None:
+            self.leave(chan)
+            s_next = chan
+        else:
+            s_next = 0
+        if self.num - s_next < 2:
+            s_next = 0
+        chan = random.randint(s_next, self.num)
+        return self.join(chan)
+
+    def gaddr(self, chan):
+        '''Return the group address for a channel'''
+        if chan >= self.num:
+            return None
+        return self.channels[chan]
+
+    def caddr(self, group):
+        '''Return a channel given a group addr'''
+        if self.group_channel_map.has_key(group):
+            return self.group_channel_map[group]
+        return None
+
+    def recv_cb(self, pkt):
+        '''Default channel receive callback'''
+        log.debug('Received packet from source %s, destination %s' %(pkt[IP].src, pkt[IP].dst))
+        send_time = float(pkt[IP].payload.load)
+        recv_time = monotonic.monotonic()
+        log.debug('Packet received in %.3f usecs' %(recv_time - send_time))
+
+    def recv(self, chan, cb = None, count = 1):
+        if chan is None:
+            return None
+        if type(chan) == type([]) or type(chan) == type(()):
+            channel_list=filter(lambda c: c < self.num, chan)
+            groups = map(lambda c: self.gaddr(c), channel_list)
+        else:
+            groups = (self.gaddr(chan),)
+        if cb is None:
+            cb = self.recv_cb
+        sniff(prn = cb, count=count, lfilter = lambda p: p[IP].dst in groups, opened_socket = self.recv_sock)
+
+    def stop(self):
+        if self.streams:
+            self.streams.stop()
+        self.state = self.Stopped
+
+    def get_state(self, chan):
+        return self.channel_states[chan][0]
+
+    def set_state(self, chan, state):
+        self.channel_states[chan][0] = state
+
+if __name__ == '__main__':
+    num = 5
+    start = 0
+    ssm_list = []
+    for i in xrange(2):
+        channels = Channels(num, start)
+        ssm_list += channels.channels
+        start += num
+    igmpChannel = IgmpChannel()
+    igmpChannel.igmp_load_ssm_config(ssm_list)
+    channels.start()
+    for i in range(num):
+        channels.join(i)
+    for i in range(num):
+        channels.recv(i)
+    for i in range(num):
+        channels.leave(i)
+    channels.stop()
diff --git a/src/test/utils/CordContainer.py b/src/test/utils/CordContainer.py
new file mode 100644
index 0000000..ad3194d
--- /dev/null
+++ b/src/test/utils/CordContainer.py
@@ -0,0 +1,319 @@
+import os,time
+import io
+import json
+from pyroute2 import IPRoute
+from itertools import chain
+from nsenter import Namespace
+from docker import Client
+from shutil import copy
+
+class docker_netns(object):
+
+    dckr = Client()
+    def __init__(self, name):
+        pid = int(self.dckr.inspect_container(name)['State']['Pid'])
+        if pid == 0:
+            raise Exception('no container named {0}'.format(name))
+        self.pid = pid
+
+    def __enter__(self):
+        pid = self.pid
+        if not os.path.exists('/var/run/netns'):
+            os.mkdir('/var/run/netns')
+        os.symlink('/proc/{0}/ns/net'.format(pid), '/var/run/netns/{0}'.format(pid))
+        return str(pid)
+
+    def __exit__(self, type, value, traceback):
+        pid = self.pid
+        os.unlink('/var/run/netns/{0}'.format(pid))
+
+flatten = lambda l: chain.from_iterable(l)
+
+class Container(object):
+    dckr = Client()
+    def __init__(self, name, image, tag = 'latest', command = 'bash', quagga_config = None):
+        self.name = name
+        self.image = image
+        self.tag = tag
+        self.image_name = image + ':' + tag
+        self.id = None
+        self.command = command
+        self.quagga_config = quagga_config
+
+    @classmethod
+    def build_image(cls, dockerfile, tag, force=True, nocache=False):
+        f = io.BytesIO(dockerfile.encode('utf-8'))
+        if force or not cls.image_exists(tag):
+            print('Build {0}...'.format(tag))
+            for line in cls.dckr.build(fileobj=f, rm=True, tag=tag, decode=True, nocache=nocache):
+                if 'stream' in line:
+                    print(line['stream'].strip())
+
+    @classmethod
+    def image_exists(cls, name):
+        return name in [ctn['RepoTags'][0] for ctn in cls.dckr.images()]
+
+    @classmethod
+    def create_host_config(cls, port_list = None, host_guest_map = None, privileged = False):
+        port_bindings = None
+        binds = None
+        if port_list:
+            port_bindings = {}
+            for p in port_list:
+                port_bindings[str(p)] = str(p)
+
+        if host_guest_map:
+            binds = []
+            for h, g in host_guest_map:
+                binds.append('{0}:{1}'.format(h, g))
+
+        return cls.dckr.create_host_config(binds = binds, port_bindings = port_bindings, privileged = privileged)
+
+    @classmethod
+    def cleanup(cls, image):
+        cnt_list = filter(lambda c: c['Image'] == image, cls.dckr.containers())
+        for cnt in cnt_list:
+            print('Cleaning container %s' %cnt['Id'])
+            cls.dckr.kill(cnt['Id'])
+            cls.dckr.remove_container(cnt['Id'], force=True)
+
+    @classmethod
+    def remove_container(cls, name, force=True):
+        try:
+            cls.dckr.remove_container(name, force = force)
+        except: pass
+
+    def exists(self):
+        return '/{0}'.format(self.name) in list(flatten(n['Names'] for n in self.dckr.containers()))
+
+    def img_exists(self):
+        return self.image_name in [ctn['RepoTags'][0] for ctn in self.dckr.images()]
+
+    def ip(self):
+        cnt_list = filter(lambda c: c['Image'] == self.image_name, self.dckr.containers())
+        cnt_settings = cnt_list.pop()
+        return cnt_settings['NetworkSettings']['Networks']['bridge']['IPAddress']
+
+    def kill(self, remove = True):
+        self.dckr.kill(self.name)
+        self.dckr.remove_container(self.name, force=True)
+
+    def start(self, rm = True, ports = None, volumes = None, host_config = None, 
+              environment = None, tty = False, stdin_open = True):
+
+        if rm and self.exists():
+            print('Removing container:', self.name)
+            self.dckr.remove_container(self.name, force=True)
+
+        ctn = self.dckr.create_container(image=self.image_name, ports = ports, command=self.command, 
+                                         detach=True, name=self.name,
+                                         environment = environment, 
+                                         volumes = volumes, 
+                                         host_config = host_config, stdin_open=stdin_open, tty = tty)
+        self.dckr.start(container=self.name)
+        if self.quagga_config:
+            self.connect_to_br()
+        self.id = ctn['Id']
+        return ctn
+
+    def connect_to_br(self):
+        index = 0
+        with docker_netns(self.name) as pid:
+            for quagga_config in self.quagga_config:
+                ip = IPRoute()
+                br = ip.link_lookup(ifname=quagga_config['bridge'])
+                if len(br) == 0:
+                    ip.link_create(ifname=quagga_config['bridge'], kind='bridge')
+                    br = ip.link_lookup(ifname=quagga_config['bridge'])
+                br = br[0]
+                ip.link('set', index=br, state='up')
+                ifname = '{0}-{1}'.format(self.name, index)
+                ifs = ip.link_lookup(ifname=ifname)
+                if len(ifs) > 0:
+                   ip.link_remove(ifs[0])
+                peer_ifname = '{0}-{1}'.format(pid, index)
+                ip.link_create(ifname=ifname, kind='veth', peer=peer_ifname)
+                host = ip.link_lookup(ifname=ifname)[0]
+                ip.link('set', index=host, master=br)
+                ip.link('set', index=host, state='up')
+                guest = ip.link_lookup(ifname=peer_ifname)[0]
+                ip.link('set', index=guest, net_ns_fd=pid)
+                with Namespace(pid, 'net'):
+                    ip = IPRoute()
+                    ip.link('set', index=guest, ifname='eth{}'.format(index+1))
+                    ip.addr('add', index=guest, address=quagga_config['ip'], mask=quagga_config['mask'])
+                    ip.link('set', index=guest, state='up')
+                index += 1
+
+    def execute(self, cmd, tty = True, stream = False, shell = False):
+        res = 0
+        if type(cmd) == str:
+            cmds = (cmd,)
+        else:
+            cmds = cmd
+        if shell:
+            for c in cmds:
+                res += os.system('docker exec {0} {1}'.format(self.name, c))
+            return res
+        for c in cmds:
+            i = self.dckr.exec_create(container=self.name, cmd=c, tty = tty, privileged = True)
+            self.dckr.exec_start(i['Id'], stream = stream, detach=True)
+            result = self.dckr.exec_inspect(i['Id'])
+            res += 0 if result['ExitCode'] == None else result['ExitCode']
+        return res
+
+def get_mem():
+    with open('/proc/meminfo', 'r') as fd:
+        meminfo = fd.readlines()
+        mem = 0
+        for m in meminfo:
+            if m.startswith('MemTotal:') or m.startswith('SwapTotal:'):
+                mem += int(m.split(':')[1].strip().split()[0])
+
+        mem = max(mem/1024/1024/2, 1)
+        mem = min(mem, 16)
+        return str(mem) + 'G'
+
+class Onos(Container):
+
+    quagga_config = ( { 'bridge' : 'quagga-br', 'ip': '10.10.0.4', 'mask' : 16 }, )
+    SYSTEM_MEMORY = (get_mem(),) * 2
+    JAVA_OPTS = '-Xms{} -Xmx{} -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode'.format(*SYSTEM_MEMORY)#-XX:+PrintGCDetails -XX:+PrintGCTimeStamps'
+    env = { 'ONOS_APPS' : 'drivers,openflow,proxyarp,aaa,igmp,vrouter', 'JAVA_OPTS' : JAVA_OPTS }
+    ports = [ 8181, 8101, 9876, 6653, 6633, 2000, 2620 ]
+    host_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/onos-config')
+    guest_config_dir = '/root/onos/config'
+    host_guest_map = ( (host_config_dir, guest_config_dir), )
+
+    def __init__(self, name = 'cord-onos', image = 'onosproject/onos', tag = 'latest', 
+                 boot_delay = 60, restart = False, network_cfg = None):
+        if restart is True:
+            ##Find the right image to restart
+            running_image = filter(lambda c: c['Names'][0] == '/{}'.format(name), self.dckr.containers())
+            if running_image:
+                image_name = running_image[0]['Image']
+                try:
+                    image = image_name.split(':')[0]
+                    tag = image_name.split(':')[1]
+                except: pass
+
+        super(Onos, self).__init__(name, image, tag = tag, quagga_config = self.quagga_config)
+        if restart is True and self.exists():
+            self.kill()
+        if not self.exists():
+            self.remove_container(name, force=True)
+            host_config = self.create_host_config(port_list = self.ports,
+                                                  host_guest_map = self.host_guest_map)
+            volumes = []
+            for _,g in self.host_guest_map:
+                volumes.append(g)
+            if network_cfg is not None:
+                json_data = json.dumps(network_cfg)
+                with open('{}/network-cfg.json'.format(self.host_config_dir), 'w') as f:
+                    f.write(json_data)
+            print('Starting ONOS container %s' %self.name)
+            self.start(ports = self.ports, environment = self.env, 
+                       host_config = host_config, volumes = volumes, tty = True)
+            print('Waiting %d seconds for ONOS to boot' %(boot_delay))
+            time.sleep(boot_delay)
+
+class Radius(Container):
+    ports = [ 1812, 1813 ]
+    env = {'TIMEZONE':'America/Los_Angeles', 
+           'DEBUG': 'true', 'cert_password':'whatever', 'primary_shared_secret':'radius_password'
+           }
+    host_db_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/radius-config/db')
+    guest_db_dir = os.path.join(os.path.sep, 'opt', 'db')
+    host_config_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/radius-config/freeradius')
+    guest_config_dir = os.path.join(os.path.sep, 'etc', 'freeradius')
+    start_command = os.path.join(guest_config_dir, 'start-radius.py')
+    host_guest_map = ( (host_db_dir, guest_db_dir),
+                       (host_config_dir, guest_config_dir)
+                       )
+    def __init__(self, name = 'cord-radius', image = 'cord-test/radius', tag = 'latest',
+                 boot_delay = 10, restart = False):
+        super(Radius, self).__init__(name, image, tag = tag, command = self.start_command)
+        if not self.img_exists():
+            self.build_image(image)
+        if restart is True and self.exists():
+            self.kill()
+        if not self.exists():
+            self.remove_container(name, force=True)
+            host_config = self.create_host_config(port_list = self.ports,
+                                                  host_guest_map = self.host_guest_map)
+            volumes = []
+            for _,g in self.host_guest_map:
+                volumes.append(g)
+            self.start(ports = self.ports, environment = self.env, 
+                       volumes = volumes, 
+                       host_config = host_config, tty = True)
+            time.sleep(boot_delay)
+
+    @classmethod
+    def build_image(cls, image):
+        print('Building Radius image %s' %image)
+        dockerfile = '''
+FROM hbouvier/docker-radius
+MAINTAINER chetan@ciena.com
+LABEL RUN docker pull hbouvier/docker-radius
+LABEL RUN docker run -it --name cord-radius hbouvier/docker-radius
+RUN apt-get update
+RUN apt-get -y install python python-pexpect strace
+WORKDIR /root
+CMD ["/etc/freeradius/start-radius.py"]
+'''
+        super(Radius, cls).build_image(dockerfile, image)
+        print('Done building image %s' %image)
+
+class Quagga(Container):
+    quagga_config = ( { 'bridge' : 'quagga-br', 'ip': '10.10.0.3', 'mask' : 16 }, 
+                      { 'bridge' : 'quagga-br', 'ip': '192.168.10.3', 'mask': 16 },
+                      )
+    ports = [ 179, 2601, 2602, 2603, 2604, 2605, 2606 ]
+    host_quagga_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'setup/quagga-config')
+    guest_quagga_config = '/root/config'
+    quagga_config_file = os.path.join(guest_quagga_config, 'testrib.conf')
+    host_guest_map = ( (host_quagga_config, guest_quagga_config), )
+    
+    def __init__(self, name = 'cord-quagga', image = 'cord-test/quagga', tag = 'latest', 
+                 boot_delay = 15, restart = False, config_file = quagga_config_file):
+        super(Quagga, self).__init__(name, image, tag = tag, quagga_config = self.quagga_config)
+        if not self.img_exists():
+            self.build_image(image)
+        if restart is True and self.exists():
+            self.kill()
+        if not self.exists():
+            self.remove_container(name, force=True)
+            host_config = self.create_host_config(port_list = self.ports, 
+                                                  host_guest_map = self.host_guest_map, 
+                                                  privileged = True)
+            volumes = []
+            for _,g in self.host_guest_map:
+                volumes.append(g)
+            self.start(ports = self.ports,
+                       host_config = host_config, 
+                       volumes = volumes, tty = True)
+            print('Starting Quagga on container %s' %self.name)
+            self.execute('{0}/start.sh {1}'.format(self.guest_quagga_config, config_file))
+            time.sleep(boot_delay)
+
+    @classmethod
+    def build_image(cls, image):
+        onos_quagga_ip = Onos.quagga_config[0]['ip']
+        print('Building Quagga image %s' %image)
+        dockerfile = '''
+FROM ubuntu:latest
+WORKDIR /root
+RUN useradd -M quagga
+RUN mkdir /var/log/quagga && chown quagga:quagga /var/log/quagga
+RUN mkdir /var/run/quagga && chown quagga:quagga /var/run/quagga
+RUN apt-get update && apt-get install -qy git autoconf libtool gawk make telnet libreadline6-dev
+RUN git clone git://git.sv.gnu.org/quagga.git quagga && \
+(cd quagga && git checkout HEAD && ./bootstrap.sh && \
+sed -i -r 's,htonl.*?\(INADDR_LOOPBACK\),inet_addr\("{0}"\),g' zebra/zebra_fpm.c && \
+./configure --enable-fpm --disable-doc --localstatedir=/var/run/quagga && make && make install)
+RUN ldconfig
+'''.format(onos_quagga_ip)
+        super(Quagga, cls).build_image(dockerfile, image)
+        print('Done building image %s' %image)
+
diff --git a/src/test/utils/CordTestServer.py b/src/test/utils/CordTestServer.py
new file mode 100644
index 0000000..06ddfba
--- /dev/null
+++ b/src/test/utils/CordTestServer.py
@@ -0,0 +1,106 @@
+import SocketServer as socketserver
+import threading
+import socket
+from CordContainer import Onos, Quagga
+from nose.tools import nottest
+
+##Server to handle container restart requests from test container.
+##Used now to restart ONOS from vrouter test container
+
+CORD_TEST_HOST = '172.17.0.1'
+CORD_TEST_PORT = 25000
+
+class CordTestServer(socketserver.BaseRequestHandler):
+
+    def restart_onos(self, *args):
+        print('Restarting ONOS')
+        onos = Onos(restart = True)
+        self.request.sendall('DONE')
+
+    def restart_quagga(self, *args):
+        config_file = Quagga.quagga_config_file
+        boot_delay = 15
+        if args:
+            config_file = args[0]
+            if len(args) > 1:
+                boot_delay = int(args[1])
+        print('Restarting QUAGGA with config file %s, delay %d secs'%(config_file, boot_delay))
+        quagga = Quagga(restart = True, config_file = config_file, boot_delay = boot_delay)
+        self.request.sendall('DONE')
+
+    def restart_radius(self, *args):
+        print('Restarting RADIUS Server')
+        radius = Radius(restart = True)
+        self.request.sendall('DONE')
+
+    callback_table = { 'RESTART_ONOS' : restart_onos,
+                       'RESTART_QUAGGA' : restart_quagga,
+                       'RESTART_RADIUS' : restart_radius,
+                     }
+
+    def handle(self):
+        data = self.request.recv(1024).strip()
+        cmd = data.split()[0]
+        try:
+            #args = ' '.join(data.split()[1:])
+            args = data.split()[1:]
+        except:
+            args = None
+
+        if self.callback_table.has_key(cmd):
+            self.callback_table[cmd](self, *args)
+
+class ThreadedTestServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
+    allow_reuse_address = True
+
+@nottest
+def cord_test_server_start():
+    server = ThreadedTestServer( (CORD_TEST_HOST, CORD_TEST_PORT), CordTestServer)
+    task = threading.Thread(target = server.serve_forever)
+    ##terminate when main thread exits
+    task.daemon = True
+    task.start()
+    return server
+
+@nottest
+def cord_test_server_stop(server):
+    server.shutdown()
+    server.server_close()
+
+@nottest
+def cord_test_onos_restart():
+    '''Send ONOS restart to server'''
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect( (CORD_TEST_HOST, CORD_TEST_PORT) )
+    s.sendall('RESTART_ONOS\n')
+    data = s.recv(1024).strip()
+    s.close()
+    if data == 'DONE':
+        return True
+    return False
+
+@nottest
+def cord_test_quagga_restart(config_file = None, boot_delay = 30):
+    '''Send QUAGGA restart to server'''
+    if config_file is None:
+        config_file = Quagga.quagga_config_file
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect( (CORD_TEST_HOST, CORD_TEST_PORT) )
+    s.sendall('RESTART_QUAGGA {0} {1}\n'.format(config_file, boot_delay))
+    data = s.recv(1024).strip()
+    s.close()
+    if data == 'DONE':
+        return True
+    return False
+
+@nottest
+def cord_test_radius_restart():
+    '''Send Radius server restart to server'''
+    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+    s.connect( (CORD_TEST_HOST, CORD_TEST_PORT) )
+    s.sendall('RESTART_RADIUS\n')
+    data = s.recv(1024).strip()
+    s.close()
+    if data == 'DONE':
+        return True
+    return False
diff --git a/src/test/utils/DHCP.py b/src/test/utils/DHCP.py
index cafa95e..500c918 100644
--- a/src/test/utils/DHCP.py
+++ b/src/test/utils/DHCP.py
@@ -11,12 +11,14 @@
         self.iface = iface
         self.mac_map = {}
         self.mac_inverse_map = {}
+	self.bootpmac = None
+	self.dhcpresp = None
 
     def is_mcast(self, ip):
         mcast_octet = (atol(ip) >> 24) & 0xff
         return True if mcast_octet >= 224 and mcast_octet <= 239 else False
 
-    def send(self, mac = None, update_seed = False):
+    def discover(self, mac = None, update_seed = False):
         '''Send a DHCP discover/offer'''
 
         if mac is None:
@@ -25,14 +27,16 @@
                 self.seed_ip = self.incIP(self.seed_ip)
                 self.seed_mac = self.ipToMac(self.seed_ip)
                 mac = self.seed_mac
-                
+
         chmac = self.macToChaddr(mac)
+	self.bootpmac = chmac
         L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
         L3 = IP(src="0.0.0.0", dst="255.255.255.255")
         L4 = UDP(sport=68, dport=67)
         L5 = BOOTP(chaddr=chmac)
         L6 = DHCP(options=[("message-type","discover"),"end"])
-        resp = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=5, iface=self.iface)
+        resp = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
+	self.dhcpresp = resp
         try:
             srcIP = resp.yiaddr
             serverIP = resp.siaddr
@@ -40,6 +44,7 @@
             print("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
             return (None, None)
 
+        subnet_mask = "0.0.0.0"
         for x in resp.lastlayer().options:
             if(x == 'end'):
                 break
@@ -48,19 +53,144 @@
                 subnet_mask = val
             elif(op == 'server_id'):
                 server_id = val
-            
+
         L5 = BOOTP(chaddr=chmac, yiaddr=srcIP)
-        L6 = DHCP(options=[("message-type","request"), ("server_id",server_id), 
+        L6 = DHCP(options=[("message-type","request"), ("server_id",server_id),
                            ("subnet_mask",subnet_mask), ("requested_addr",srcIP), "end"])
-        srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=5, iface=self.iface)
+        srp(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
         self.mac_map[mac] = (srcIP, serverIP)
         self.mac_inverse_map[srcIP] = (mac, serverIP)
         return (srcIP, serverIP)
 
-    def send_next(self):
-        '''Send next dhcp discover/request with updated mac'''
+    def only_discover(self, mac = None, desired = False, lease_time = False):  
+        '''Send a DHCP discover'''
 
-        return self.send(update_seed = True)
+        if mac is None:
+            mac = self.seed_mac
+
+        chmac = self.macToChaddr(mac)
+	self.bootpmac = chmac
+        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
+        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
+        L4 = UDP(sport=68, dport=67)
+        L5 = BOOTP(chaddr=chmac)
+	if desired:
+		L6 = DHCP(options=[("message-type","discover"),("requested_addr",self.seed_ip),"end"])
+	
+	elif time:
+		L6 = DHCP(options=[("message-type","discover"),("lease_time",700),"end"])
+		
+	elif lease_time:
+        	L6 = DHCP(options=[("message-type","discover"),"end"])
+
+		
+
+        resp = srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
+	if resp == None:
+        	return (None, None, None)
+		
+	self.dhcpresp = resp
+        for x in resp.lastlayer().options:
+            if(x == 'end'):
+                break
+            op,val = x
+            if(op == "message-type"):
+            	
+	    	if(val == 2):
+		
+			try:
+            			srcIP = resp.yiaddr
+            			serverIP = resp.siaddr
+        		except AttributeError:
+           			print "In Attribute error."
+            		 	print("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
+            		 	return (None, None, None)
+			
+			if lease_time == True:
+				for x in resp.lastlayer().options:
+            				if(x == 'end'):
+                				break
+            				op,val = x
+            				if(op == "lease_time"):
+						return (srcIP, serverIP, mac, val)
+			else:
+				return (srcIP, serverIP, mac)
+	
+		elif(val == 6):
+		
+			return (None, None, mac)
+
+	
+    def only_request(self, cip, mac):
+        '''Send a DHCP offer'''
+        
+	subnet_mask = "0.0.0.0"
+        for x in self.dhcpresp.lastlayer().options:
+            	if(x == 'end'):
+                	break
+            	op,val = x
+            	if(op == "subnet_mask"):
+                	subnet_mask = val
+            	elif(op == 'server_id'):
+                	server_id = val
+
+        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
+        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
+        L4 = UDP(sport=68, dport=67)
+        L5 = BOOTP(chaddr=self.bootpmac, yiaddr=cip)
+        L6 = DHCP(options=[("message-type","request"), ("server_id",server_id),
+                           ("subnet_mask",subnet_mask), ("requested_addr",cip), "end"])
+	resp=srp1(L2/L3/L4/L5/L6, filter="udp and port 68", timeout=10, iface=self.iface)
+	if resp == None:
+        	return (None, None)
+        
+	for x in resp.lastlayer().options:
+            	if(x == 'end'):
+                	break
+            	op,val = x
+            	if(op == "message-type"):
+	
+			if(val == 5):
+        	
+				try:
+            				srcIP = resp.yiaddr
+            				serverIP = resp.siaddr
+        			except AttributeError:
+           				print "In Attribute error."
+            				print("Failed to acquire IP via DHCP for %s on interface %s" %(mac, self.iface))
+            				return (None, None)
+	        		self.mac_map[mac] = (srcIP, serverIP)
+        			self.mac_inverse_map[srcIP] = (mac, serverIP)
+
+				return (srcIP, serverIP)
+	
+			elif(val == 6):
+		
+				return (None, None)
+	
+		
+
+    def discover_next(self):
+        '''Send next dhcp discover/request with updated mac'''
+        return self.discover(update_seed = True)
+
+    def release(self, ip):
+        '''Send a DHCP discover/offer'''
+        if ip is None:
+            return False
+        if not self.mac_inverse_map.has_key(ip):
+            return False
+        mac, server_ip = self.mac_inverse_map[ip]
+        chmac = self.macToChaddr(mac)
+        L2 = Ether(dst="ff:ff:ff:ff:ff:ff", src=mac)
+        L3 = IP(src="0.0.0.0", dst="255.255.255.255")
+        L4 = UDP(sport=68, dport=67)
+        L5 = BOOTP(chaddr=chmac, ciaddr = ip)
+        L6 = DHCP(options=[("message-type","release"), ("server_id", server_ip), "end"])
+        sendp(L2/L3/L4/L5/L6, iface = self.iface)
+        del self.mac_map[mac]
+        del self.mac_inverse_map[ip]
+        return True
 
     def macToChaddr(self, mac):
         rv = []
@@ -96,14 +226,14 @@
     def incIP(self, ip, n=1):
         '''Increment an IP'''
 
-        if n < 1: 
+        if n < 1:
             return ip
         o = ip.split(".")
         for ii in range(3,-1,-1):
             if int(o[ii]) < 255:
                 o[ii] = str(int(o[ii]) + 1)
                 break
-            else: 
+            else:
                 o[ii] = str(0)
 
         n -= 1
diff --git a/src/test/utils/EapMD5.py b/src/test/utils/EapMD5.py
new file mode 100644
index 0000000..61ca405
--- /dev/null
+++ b/src/test/utils/EapMD5.py
@@ -0,0 +1,96 @@
+import sys, os
+from EapolAAA import *
+from enum import *
+import noseMd5AuthHolder as md5AuthHolder
+from socket import *
+from struct import *
+from md5 import md5
+from scapy.all import *
+from nose.tools import *
+from CordTestBase import CordTester
+
+class MD5AuthTest(EapolPacket, CordTester):
+
+    md5StateTable = Enumeration("MD5StateTable", ("ST_EAP_SETUP",
+                                                  "ST_EAP_START",
+                                                  "ST_EAP_ID_REQ",
+                                                  "ST_EAP_MD5_CHALLENGE",
+                                                  "ST_EAP_STATUS",
+                                                  "ST_EAP_MD5_DONE"
+                                                  )
+                                )
+    md5EventTable = Enumeration("MD5EventTable", ("EVT_EAP_SETUP",
+                                                  "EVT_EAP_START",
+                                                  "EVT_EAP_ID_REQ",
+                                                  "EVT_EAP_MD5_CHALLENGE",
+                                                  "EVT_EAP_STATUS",
+                                                  "EVT_EAP_MD5_DONE"
+                                                  )
+                                )
+    def __init__(self, intf = 'veth0', password = "password", required_status = "EAP_SUCCESS"):
+        self.passwd = password
+        self.req_status = required_status
+        self.fsmTable = md5AuthHolder.initMd5AuthHolderFsmTable(self, self.md5StateTable, self.md5EventTable)
+        EapolPacket.__init__(self, intf)
+        CordTester.__init__(self, self.fsmTable, self.md5StateTable.ST_EAP_MD5_DONE)
+        self.currentState = self.md5StateTable.ST_EAP_SETUP
+        self.currentEvent = self.md5EventTable.EVT_EAP_SETUP
+        self.nextState = None
+        self.nextEvent = None
+
+    def _eapSetup(self):
+        print 'Inside EAP Setup'
+        self.setup()
+        self.nextEvent = self.md5EventTable.EVT_EAP_START
+        
+    def _eapStart(self):
+        print 'Inside EAP Start'
+        self.eapol_start()
+        self.nextEvent = self.md5EventTable.EVT_EAP_ID_REQ
+
+    def _eapIdReq(self):
+        print 'Inside EAP ID Req'
+        p = self.eapol_recv()
+        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
+        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
+        assert_equal(code, EAP_REQUEST)
+        reqtype = unpack("!B", p[4:5])[0]
+        reqdata = p[5:4+eaplen]
+        assert_equal(reqtype, EAP_TYPE_ID)
+        print "<====== Send EAP Response with identity = %s ================>" % USER
+        self.eapol_id_req(pkt_id, USER)
+        self.nextEvent = self.md5EventTable.EVT_EAP_MD5_CHALLENGE
+
+    def _eapMd5Challenge(self):
+        print 'Inside EAP MD5 Challenge Exchange'
+        challenge,pkt_id =self.eap_md5_challenge_recv(self.passwd)
+        resp=md5(challenge).digest()
+        resp=chr(len(resp))+resp
+        length= 5+len(resp)
+        print "Generated MD5 challenge is %s Length : %d" % (resp,length)
+        print "--> Send EAP response with MD5 challenge"
+        eap_payload = self.eap(EAP_RESPONSE, pkt_id, EAP_TYPE_MD5, str(resp))
+        self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+        self.nextEvent = self.md5EventTable.EVT_EAP_STATUS
+
+    def _eapStatus(self):
+       print 'Inside EAP Status -- Sucess/Failure'
+       if self.req_status == "EAP_SUCCESS":
+         status=self.eap_Status()
+         print "<============EAP code received is = %d ====================>" % status
+         assert_equal(status, EAP_SUCCESS)
+         print"Received EAP SUCCESS"
+       else:
+         print 'Inside EAP Status -- Sucess/Failure ===> SUCCESS should not be received , Since Negative Testcase'
+         self.s.settimeout(10)
+         assert_equal(self.s.gettimeout(), 10)
+         print "Check if the socket timed out ====> Since negative testcase socket should timeout because ONOS is not sending the EAP FAILURE Message"
+         assert_raises(socket.error, self.s.recv, 1024)
+       self.nextEvent = self.md5EventTable.EVT_EAP_MD5_DONE
+
+    def _wrong_password(self):
+       print 'Start Testcase for EAP-MD5 Wrong Password'
+       #self._eap_md5_states()
+       self.__init__(intf = 'veth0', password = "wrong_password", required_status = "EAP_FAILURE")
+      
+
diff --git a/src/test/utils/EapPAP.py b/src/test/utils/EapPAP.py
index a40ae9d..a128f05 100644
--- a/src/test/utils/EapPAP.py
+++ b/src/test/utils/EapPAP.py
@@ -1,17 +1,15 @@
 import sys, os
-cord_root = os.getenv('CORD_TEST_ROOT') or './'
-CORD_TEST_FSM = 'fsm'
-sys.path.append(cord_root + CORD_TEST_FSM)
 from EapolAAA import *
 from enum import *
 import nosePAPAuthHolder as PAPAuthHolder
 from socket import *
 from struct import *
-import scapy
+from scapy.all import *
 from nose.tools import *
 from CordTestBase import CordTester
 PAP_USER = "raduser"
 PAP_PASSWD = "radpass"
+log.setLevel('INFO')
 
 class PAPAuthTest(EapolPacket, CordTester):
 
@@ -52,37 +50,37 @@
         self.nextEvent = self.PAPEventTable.EVT_EAP_ID_REQ
 
     def _eapIdReq(self):
-        print 'Inside EAP ID Req'
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        reqdata = p[5:4+eaplen]
-        assert_equal(reqtype, EAP_TYPE_ID)
-        print "<====== Send EAP Response with identity = %s ================>" % PAP_USER
-        self.eapol_id_req(pkt_id, PAP_USER)
+        log.info( 'Inside EAP ID Req' )
+        def eapol_cb(pkt):
+                log.info('Got EAPOL packet with type id and code request')
+                log.info('Packet code: %d, type: %d, id: %s', pkt[EAP].code, pkt[EAP].type, pkt[EAP].id)
+                log.info("<====== Send EAP Response with identity = %s ================>" % PAP_USER)
+                self.eapol_id_req(pkt[EAP].id, PAP_USER)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP.TYPE_ID and pkt[EAP].code == EAP.REQUEST)
         self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_USER_REQ
 
     def _eapPAPUserReq(self):
-        print 'UserReq Inside Challenge'
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        reqdata = p[5:4+eaplen]
-        assert_equal(reqtype, EAP_TYPE_TLS)
-        print "<====== Send EAP Response with Password = %s ================>" % PAP_PASSWD 
-        self.eapol_id_req(pkt_id, PAP_PASSWD)
+        log.info('UserReq Inside Challenge')
+        def eapol_cb(pkt):
+                log.info('Got EAPOL packet with type id and code request')
+                log.info('Packet code: %d, id: %s', pkt[EAP].code, pkt[EAP].id)
+                log.info('Send EAP Response for id %s with Password = %s' %(pkt[EAP].id, PAP_PASSWD) )
+                self.eapol_id_req(pkt[EAP].id, PAP_PASSWD)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
         #self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_PASSWD_REQ
-        self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_DONE
- 
+        self.nextEvent = None
+
     def _eapPAPPassReq(self):
-        print 'PassReq Inside Challenge'
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_SUCCESS)
+        log.info('PassReq Inside Challenge')
+        def eapol_cb(pkt):
+                log.info('Got EAPOL packet with type id and code request')
+                log.info('Packet code: %d, type: %d', pkt[EAP].code, pkt[EAP].type)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].code == EAP.SUCCESS)
         self.nextEvent = self.PAPEventTable.EVT_EAP_PAP_DONE
  
diff --git a/src/test/utils/EapTLS.py b/src/test/utils/EapTLS.py
index d6b6b7e..f3cb66b 100644
--- a/src/test/utils/EapTLS.py
+++ b/src/test/utils/EapTLS.py
@@ -1,7 +1,4 @@
 import sys, os
-cord_root = os.getenv('CORD_TEST_ROOT') or './'
-CORD_TEST_FSM = 'fsm'
-sys.path.append(cord_root + CORD_TEST_FSM)
 from EapolAAA import *
 from enum import *
 import noseTlsAuthHolder as tlsAuthHolder
@@ -12,7 +9,7 @@
 from nose.tools import *
 from CordTestBase import CordTester
 import re
-
+log.setLevel('INFO')
 class TLSAuthTest(EapolPacket, CordTester):
 
     tlsStateTable = Enumeration("TLSStateTable", ("ST_EAP_SETUP",
@@ -20,6 +17,8 @@
                                                   "ST_EAP_ID_REQ",
                                                   "ST_EAP_TLS_HELLO_REQ",
                                                   "ST_EAP_TLS_CERT_REQ",
+                                                  "ST_EAP_TLS_CHANGE_CIPHER_SPEC",
+                                                  "ST_EAP_TLS_FINISHED",
                                                   "ST_EAP_TLS_DONE"
                                                   )
                                 )
@@ -28,6 +27,8 @@
                                                   "EVT_EAP_ID_REQ",
                                                   "EVT_EAP_TLS_HELLO_REQ",
                                                   "EVT_EAP_TLS_CERT_REQ",
+                                                  "EVT_EAP_TLS_CHANGE_CIPHER_SPEC",
+                                                  "EVT_EAP_TLS_FINISHED",
                                                   "EVT_EAP_TLS_DONE"
                                                   )
                                 )
@@ -42,37 +43,30 @@
         self.nextEvent = None
 
     def _eapSetup(self):
-        print 'Inside EAP Setup'
         self.setup()
         self.nextEvent = self.tlsEventTable.EVT_EAP_START
         
     def _eapStart(self):
-        print 'Inside EAP Start'
         self.eapol_start()
         self.nextEvent = self.tlsEventTable.EVT_EAP_ID_REQ
 
     def _eapIdReq(self):
-        print 'Inside EAP ID Req'
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        reqdata = p[5:4+eaplen]
-        assert_equal(reqtype, EAP_TYPE_ID)
-        print "<====== Send EAP Response with identity = %s ================>" % USER
-        self.eapol_id_req(pkt_id, USER)
+        log.info( 'Inside EAP ID Req' )
+        def eapol_cb(pkt):
+                log.info('Got EAPOL packet with type id and code request')
+                log.info('Packet code: %d, type: %d, id: %d', pkt[EAP].code, pkt[EAP].type, pkt[EAP].id)
+                log.info("<====== Send EAP Response with identity = %s ================>" % USER)
+                self.eapol_id_req(pkt[EAP].id, USER)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP.TYPE_ID and pkt[EAP].code == EAP.REQUEST)
         self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_HELLO_REQ
 
     def _eapTlsHelloReq(self):
-        print 'Inside EAP TLS Hello Req'
-        p = self.eapol_recv()
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        assert_equal(reqtype, EAP_TYPE_TLS)
-        reqdata = TLSRecord(version="TLS_1_0")/TLSHandshake()/TLSClientHello(version="TLS_1_0",
+
+        def eapol_cb(pkt):
+                log.info('Got hello request for id %d', pkt[EAP].id)
+                reqdata = TLSRecord(version="TLS_1_0")/TLSHandshake()/TLSClientHello(version="TLS_1_0",
                                                                              gmt_unix_time=1234,
                                                                              random_bytes="A" * 28,
                                                                              session_id='',
@@ -80,85 +74,71 @@
                                                                              cipher_suites=[TLSCipherSuite.RSA_WITH_AES_128_CBC_SHA]
                                                                              )
 
-        #reqdata.show()
-        print "------> Sending Client Hello TLS payload of len %d ----------->" %len(reqdata)
-        eap_payload = self.eapTLS(EAP_RESPONSE, pkt_id, TLS_LENGTH_INCLUDED, str(reqdata))
-        self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+                #reqdata.show()
+                log.debug("Sending Client Hello TLS payload of len %d, id %d" %(len(reqdata),pkt[EAP].id))
+                eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, TLS_LENGTH_INCLUDED, str(reqdata))
+                self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
         self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_CERT_REQ
 
     def _eapTlsCertReq(self):
-        print 'Inside EAP TLS Cert Req'
-        p = self.eapol_recv()
-        print 'Got TLS Cert Req with payload len: %d' %len(p)
-        code, pkt_id, eaplen = unpack("!BBH", p[0:4])
-        print "Code %d, id %d, len %d" %(code, pkt_id, eaplen)
-        assert_equal(code, EAP_REQUEST)
-        reqtype = unpack("!B", p[4:5])[0]
-        assert_equal(reqtype, EAP_TYPE_TLS)
-        rex_pem = re.compile(r'\-+BEGIN[^\-]+\-+(.*?)\-+END[^\-]+\-+', re.DOTALL)
-        self.pem_cert="""-----BEGIN CERTIFICATE-----
-MIIE4TCCA8mgAwIBAgIJANhJTS6x4B0iMA0GCSqGSIb3DQEBCwUAMIGTMQswCQYD
-VQQGEwJGUjEPMA0GA1UECBMGUmFkaXVzMRIwEAYDVQQHEwlTb21ld2hlcmUxFTAT
-BgNVBAoTDEV4YW1wbGUgSW5jLjEgMB4GCSqGSIb3DQEJARYRYWRtaW5AZXhhbXBs
-ZS5jb20xJjAkBgNVBAMTHUV4YW1wbGUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4X
-DTE0MDUyMDExNTkzNloXDTE0MDcxOTExNTkzNlowgZMxCzAJBgNVBAYTAkZSMQ8w
-DQYDVQQIEwZSYWRpdXMxEjAQBgNVBAcTCVNvbWV3aGVyZTEVMBMGA1UEChMMRXhh
-bXBsZSBJbmMuMSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBleGFtcGxlLmNvbTEmMCQG
-A1UEAxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQC/KUyltP0BS5A/sYg/XJOZMSHDIUiW+D8s1JgJ
-9Q/FIAnlMpevjPQtlmWi+hpgOUGgTryV+rTlzcUNw/gjmMs1Z4bAakFIc2vCPybw
-5hgKMU2E9SMgLr1aMVzwN3BH/njt1eWQ5Q9ajyu3JzmXZwOg/tV03L7BYpjLajhT
-iln4pvO/nq9YHVGurE5qCwyyrleYmtEXPi8MxrgudaKShrr7KgXbhlSwEaGGapSD
-JFhKvyQ4UZ56qiDFXD/AIXE9o8Soouv+8ufsCOyf/xKp1QkUaZ17Fe6YHqvQYdNM
-ovwnXnX+vRW0cZVui7ufxHncb9sJSAlovxzDy/GeL0SHtdH9AgMBAAGjggE0MIIB
-MDAdBgNVHQ4EFgQUHjtJ/Mjl+dcwmT5UI37N74qh2YUwgcgGA1UdIwSBwDCBvYAU
-HjtJ/Mjl+dcwmT5UI37N74qh2YWhgZmkgZYwgZMxCzAJBgNVBAYTAkZSMQ8wDQYD
-VQQIEwZSYWRpdXMxEjAQBgNVBAcTCVNvbWV3aGVyZTEVMBMGA1UEChMMRXhhbXBs
-ZSBJbmMuMSAwHgYJKoZIhvcNAQkBFhFhZG1pbkBleGFtcGxlLmNvbTEmMCQGA1UE
-AxMdRXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCCQDYSU0useAdIjAMBgNV
-HRMEBTADAQH/MDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly93d3cuZXhhbXBsZS5j
-b20vZXhhbXBsZV9jYS5jcmwwDQYJKoZIhvcNAQELBQADggEBAEbkq17kbT7X/oiy
-E2DOV7g1W8Au+TQR0GKzjXPgmYVGixN8l/9dQZ9WVDmCetBy71UHgTxPp20My2zr
-uA7hy9FYNGtZ2jmu0p019fH+CSCL7RHHgKsY63UsldT1qPYiWiyqbWy5GvJX778N
-GxVo7oN33se1c4KEmMOLVqQqX5dDWjN2r27l0GFh1ssx4RHqOc57G5Txq861i6UT
-KlrN0xpyu7LjcQGMwKbfzCXfwys5i4rrAVX1spILTIihUKpD6FYxp6oj+d4ELZOh
-br3zfhKrkbvPCG0gEziBLnwd11ZJELQfm89IYBhmoOYkk5+ZOszDsXKGWzfV6XSW
-ZRv+LU0=
+
+        def eapol_cb(pkt):
+                log.info('Got cert request')
+                rex_pem = re.compile(r'\-+BEGIN[^\-]+\-+(.*?)\-+END[^\-]+\-+', re.DOTALL)
+                self.pem_cert = """-----BEGIN CERTIFICATE-----
+MIIDvTCCAqWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBizELMAkGA1UEBhMCVVMx
+CzAJBgNVBAgTAkNBMRIwEAYDVQQHEwlTb21ld2hlcmUxEzARBgNVBAoTCkNpZW5h
+IEluYy4xHjAcBgkqhkiG9w0BCQEWD2FkbWluQGNpZW5hLmNvbTEmMCQGA1UEAxMd
+RXhhbXBsZSBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMTYwMzExMTg1MzM2WhcN
+MTcwMzA2MTg1MzM2WjBnMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEzARBgNV
+BAoTCkNpZW5hIEluYy4xFzAVBgNVBAMUDnVzZXJAY2llbmEuY29tMR0wGwYJKoZI
+hvcNAQkBFg51c2VyQGNpZW5hLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC
+AQoCggEBAOxemcBsPn9tZsCa5o2JA6sQDC7A6JgCNXXl2VFzKLNNvB9PS6D7ZBsQ
+5An0zEDMNzi51q7lnrYg1XyiE4S8FzMGAFr94RlGMQJUbRD9V/oqszMX4k++iAOK
+tIA1gr3x7Zi+0tkjVSVzXTmgNnhChAamdMsjYUG5+CY9WAicXyy+VEV3zTphZZDR
+OjcjEp4m/TSXVPYPgYDXI40YZKX5BdvqykWtT/tIgZb48RS1NPyN/XkCYzl3bv21
+qx7Mc0fcEbsJBIIRYTUkfxnsilcnmLxSYO+p+DZ9uBLBzcQt+4Rd5pLSfi21WM39
+2Z2oOi3vs/OYAPAqgmi2JWOv3mePa/8CAwEAAaNPME0wEwYDVR0lBAwwCgYIKwYB
+BQUHAwIwNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL3d3dy5leGFtcGxlLmNvbS9l
+eGFtcGxlX2NhLmNybDANBgkqhkiG9w0BAQUFAAOCAQEALBzMPDTIB6sLyPl0T6JV
+MjOkyldAVhXWiQsTjaGQGJUUe1cmUJyZbUZEc13MygXMPOM4x7z6VpXGuq1c/Vxn
+VzQ2fNnbJcIAHi/7G8W5/SQfPesIVDsHTEc4ZspPi5jlS/MVX3HOC+BDbOjdbwqP
+RX0JEr+uOyhjO+lRxG8ilMRACoBUbw1eDuVDoEBgErSUC44pq5ioDw2xelc+Y6hQ
+dmtYwfY0DbvwxHtA495frLyPcastDiT/zre7NL51MyUDPjjYjghNQEwvu66IKbQ3
+T1tJBrgI7/WI+dqhKBFolKGKTDWIHsZXQvZ1snGu/FRYzg1l+R/jT8cRB9BDwhUt
+yg==
 -----END CERTIFICATE-----"""
-        self.der_cert = rex_pem.findall(self.pem_cert)[0].decode("base64")
-        self.pem_priv_key = """-----BEGIN RSA PRIVATE KEY-----
-MIIEpAIBAAKCAQEA84TzkjbcskbKZnrlKcXzSSgi07n+4N7kOM7uIhzpkTuU0HIv
-h4VZS2axxfV6hV3CD9MuKVg2zEhroqK1Js5n4ke230nSP/qiELfCl0R+hzRtbfKL
-tFUr1iHeU0uQ6v3q+Tg1K/Tmmg72uxKrhyHDL7z0BriPjhAHJ5XlQsvR1RCMkqzu
-D9wjSInJxpMMIgLndOclAKv4D1wQtYU7ZpTw+01XBlUhIiXb86qpYL9NqnnRq5JI
-uhmOEuxo2ca63+xaHNhD/udSyc8C0Md/yX6wlONTRFgLLv0pdLUGm1xEjfsydaQ6
-qGd7hzIKUI3hohNKJa/mHLElv7SZolPTogK/EQIDAQABAoIBAADq9FwNtuE5IRQn
-zGtO4q7Y5uCzZ8GDNYr9RKp+P2cbuWDbvVAecYq2NV9QoIiWJOAYZKklOvekIju3
-r0UZLA0PRiIrTg6NrESx3JrjWDK8QNlUO7CPTZ39/K+FrmMkV9lem9yxjJjyC34D
-AQB+YRTx+l14HppjdxNwHjAVQpIx/uO2F5xAMuk32+3K+pq9CZUtrofe1q4Agj9R
-5s8mSy9pbRo9kW9wl5xdEotz1LivFOEiqPUJTUq5J5PeMKao3vdK726XI4Z455Nm
-W2/MA0YV0ug2FYinHcZdvKM6dimH8GLfa3X8xKRfzjGjTiMSwsdjgMa4awY3tEHH
-674jhAECgYEA/zqMrc0zsbNk83sjgaYIug5kzEpN4ic020rSZsmQxSCerJTgNhmg
-utKSCt0Re09Jt3LqG48msahX8ycqDsHNvlEGPQSbMu9IYeO3Wr3fAm75GEtFWePY
-BhM73I7gkRt4s8bUiUepMG/wY45c5tRF23xi8foReHFFe9MDzh8fJFECgYEA9EFX
-4qAik1pOJGNei9BMwmx0I0gfVEIgu0tzeVqT45vcxbxr7RkTEaDoAG6PlbWP6D9a
-WQNLp4gsgRM90ZXOJ4up5DsAWDluvaF4/omabMA+MJJ5kGZ0gCj5rbZbKqUws7x8
-bp+6iBfUPJUbcqNqFmi/08Yt7vrDnMnyMw2A/sECgYEAiiuRMxnuzVm34hQcsbhH
-6ymVqf7j0PW2qK0F4H1ocT9qhzWFd+RB3kHWrCjnqODQoI6GbGr/4JepHUpre1ex
-4UEN5oSS3G0ru0rC3U4C59dZ5KwDHFm7ffZ1pr52ljfQDUsrjjIMRtuiwNK2OoRa
-WSsqiaL+SDzSB+nBmpnAizECgYBdt/y6rerWUx4MhDwwtTnel7JwHyo2MDFS6/5g
-n8qC2Lj6/fMDRE22w+CA2esp7EJNQJGv+b27iFpbJEDh+/Lf5YzIT4MwVskQ5bYB
-JFcmRxUVmf4e09D7o705U/DjCgMH09iCsbLmqQ38ONIRSHZaJtMDtNTHD1yi+jF+
-OT43gQKBgQC/2OHZoko6iRlNOAQ/tMVFNq7fL81GivoQ9F1U0Qr+DH3ZfaH8eIkX
-xT0ToMPJUzWAn8pZv0snA0um6SIgvkCuxO84OkANCVbttzXImIsL7pFzfcwV/ERK
-UM6j0ZuSMFOCr/lGPAoOQU0fskidGEHi1/kW+suSr28TqsyYZpwBDQ==
------END RSA PRIVATE KEY-----
-        """
-        self.der_priv_key = rex_pem.findall(self.pem_priv_key)[0].decode("base64")
-        reqdata = TLSRecord(version="TLS_1_0")/TLSHandshake()/TLSCertificateList(
-            certificates=[TLSCertificate(data=x509.X509Cert(self.der_cert))])
-        #reqdata.show()
-        print "------> Sending Client Hello TLS Certificate payload of len %d ----------->" %len(reqdata)
-        eap_payload = self.eapTLS(EAP_RESPONSE, pkt_id, TLS_LENGTH_INCLUDED, str(reqdata))
-        self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+                self.der_cert = rex_pem.findall(self.pem_cert)[0].decode("base64")
+                reqdata = TLSRecord(version="TLS_1_0")/TLSHandshake()/TLSCertificateList(
+                    certificates=[TLSCertificate(data=x509.X509Cert(self.der_cert))])
+                #reqdata.show()
+                log.info("------> Sending Client Hello TLS Certificate payload of len %d ----------->" %len(reqdata))
+                eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, TLS_LENGTH_INCLUDED, str(reqdata))
+                self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
+        self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_CHANGE_CIPHER_SPEC
+
+    def _eapTlsChangeCipherSpec(self):
+        def eapol_cb(pkt):
+                log.info('Got change cipher request')
+                reqdata = TLSFinished(data="")
+                eap_payload = self.eapTLS(EAP_RESPONSE, pkt[EAP].id, TLS_LENGTH_INCLUDED, str(reqdata))
+                self.eapol_send(EAPOL_EAPPACKET, eap_payload)
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
+        self.nextEvent = self.tlsEventTable.EVT_EAP_TLS_FINISHED
+
+    def _eapTlsFinished(self):
+        def eapol_cb(pkt):
+                log.info('Got tls finished request')
+
+        self.eapol_scapy_recv(cb = eapol_cb,
+                              lfilter = lambda pkt: pkt[EAP].type == EAP_TYPE_TLS and pkt[EAP].code == EAP.REQUEST)
+        #We stop here as certification validation success implies auth success
         self.nextEvent = None
diff --git a/src/test/utils/EapolAAA.py b/src/test/utils/EapolAAA.py
index 6c477f0..31be259 100644
--- a/src/test/utils/EapolAAA.py
+++ b/src/test/utils/EapolAAA.py
@@ -43,6 +43,7 @@
         self.s.bind((self.intf, ETHERTYPE_PAE))
         self.mymac = self.s.getsockname()[4]
         self.llheader = Ether(dst = PAE_GROUP_ADDR, src = self.mymac, type = ETHERTYPE_PAE)
+        self.recv_sock = L2Socket(iface = self.intf, type = ETHERTYPE_PAE)
 
     def cleanup(self):
         if self.s is not None:
@@ -76,6 +77,12 @@
         assert_equal(pkt_type, EAPOL_EAPPACKET)
         return p[4:]
 
+    def eapol_scapy_recv(self, cb = None, lfilter = None, count = 1):
+        def eapol_default_cb(pkt): pass
+        if cb is None:
+            cb = eapol_default_cb
+        sniff(prn = cb, lfilter = lfilter, count = count, opened_socket = self.recv_sock)
+
     def eapol_start(self):
         eap_payload = self.eap(EAPOL_START, 2)
         return self.eapol_send(EAPOL_START, eap_payload)
diff --git a/src/test/utils/McastTraffic.py b/src/test/utils/McastTraffic.py
index 2bd28e2..7ab79a7 100644
--- a/src/test/utils/McastTraffic.py
+++ b/src/test/utils/McastTraffic.py
@@ -6,16 +6,20 @@
 from scapy.all import *
 
 class McastTraffic(threading.Thread):
-
-    dst_mac = '01:00:5e:00:01:01'
-    src_mac = '02:88:b4:e4:90:77'
-    src_ip = '1.2.3.4'
+    DST_MAC_DEFAULT = '01:00:5e:00:01:01'
+    SRC_MAC_DEFAULT = '02:88:b4:e4:90:77'
+    SRC_IP_DEFAULT = '1.2.3.4'
     SEND_STATE = 1
     RECV_STATE = 2
-    def __init__(self, addrs, iface = 'eth0', cb = None, arg = None):
+
+    def __init__(self, addrs, iface = 'eth0', dst_mac = DST_MAC_DEFAULT, src_mac = SRC_MAC_DEFAULT,
+                 src_ip = SRC_IP_DEFAULT, cb = None, arg = None):
         threading.Thread.__init__(self)
         self.addrs = addrs
         self.iface = iface
+        self.dst_mac = dst_mac
+        self.src_mac = src_mac
+        self.src_ip = src_ip
         self.cb = cb
         self.arg = arg
         self.state = self.SEND_STATE | self.RECV_STATE
@@ -46,4 +50,3 @@
     def isSendStopped(self):
         return False if self.state & self.SEND_STATE else True
 
-    
diff --git a/src/test/utils/OltConfig.py b/src/test/utils/OltConfig.py
new file mode 100644
index 0000000..958e0bd
--- /dev/null
+++ b/src/test/utils/OltConfig.py
@@ -0,0 +1,50 @@
+import os
+import json
+##load the olt config
+
+class OltConfig:
+    def __init__(self, olt_conf_file = ''):
+        if not olt_conf_file:
+            self.olt_conf_file = os.getenv('OLT_CONFIG')
+        else:
+            self.olt_conf_file = olt_conf_file
+        try:
+            self.olt_handle = open(self.olt_conf_file, 'r')
+            self.olt_conf = json.load(self.olt_handle)
+            self.olt_conf['olt'] = True
+        except:
+            self.olt_handle = None
+            self.olt_conf = {}
+            self.olt_conf['olt'] = False
+            
+    def on_olt(self):
+        return self.olt_conf['olt'] is True
+
+    def olt_port_map(self):
+        if self.on_olt() and self.olt_conf.has_key('port_map'):
+            port_map = {}
+            port_map['ports'] = self.olt_conf['port_map']['ports']
+            port_map['start_vlan'] = 0
+            if self.olt_conf['port_map'].has_key('host'):
+                port_map['host'] = self.olt_conf['port_map']['host']
+            else:
+                port_map['host'] = 'ovsbr0'
+            if self.olt_conf['port_map'].has_key('start_vlan'):
+                port_map['start_vlan'] = int(self.olt_conf['port_map']['start_vlan'])
+                
+            ##Build a rx/tx port number to interface map
+            port_map[1] = self.olt_conf['port_map']['rx']
+            port_map[2] = self.olt_conf['port_map']['tx']
+            port_map[port_map[1]] = 1
+            port_map[port_map[2]] = 2
+            return port_map
+        else:
+            return None
+
+    def olt_device_data(self):
+        if self.on_olt():
+            accessDeviceDict = {}
+            accessDeviceDict['uplink'] = str(self.olt_conf['uplink'])
+            accessDeviceDict['vlan'] = str(self.olt_conf['vlan'])
+            return accessDeviceDict
+        return None
diff --git a/src/test/utils/OnosCtrl.py b/src/test/utils/OnosCtrl.py
index 043438f..f41bd6a 100644
--- a/src/test/utils/OnosCtrl.py
+++ b/src/test/utils/OnosCtrl.py
@@ -3,21 +3,25 @@
 import os,sys,time
 
 class OnosCtrl:
-    
+
+    auth = ('karaf', 'karaf')
+    controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
+    cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(controller)
+    applications_url = 'http://%s:8181/onos/v1/applications' %(controller)
+
     def __init__(self, app, controller = None):
         self.app = app
-        if controller is None:
-            self.controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
-        else:
+        if controller is not None:
             self.controller = controller
         self.app_url = 'http://%s:8181/onos/v1/applications/%s' %(self.controller, self.app)
         self.cfg_url = 'http://%s:8181/onos/v1/network/configuration/' %(self.controller)
         self.auth = ('karaf', 'karaf')
 
-    def config(self, config):
+    @classmethod
+    def config(cls, config):
         if config:
             json_data = json.dumps(config)
-            resp = requests.post(self.cfg_url, auth = self.auth, data = json_data)
+            resp = requests.post(cls.cfg_url, auth = cls.auth, data = json_data)
             return resp.ok, resp.status_code
         return False, 400
 
@@ -29,4 +33,50 @@
         resp = requests.delete(self.app_url + '/active', auth = self.auth)
         return resp.ok, resp.status_code
 
+    @classmethod
+    def get_devices(cls):
+        url = 'http://%s:8181/onos/v1/devices' %(cls.controller)
+        result = requests.get(url, auth = cls.auth)
+        if result.ok:
+            devices = result.json()['devices']
+            return filter(lambda d: d['available'], devices)
 
+        return None
+
+    @classmethod
+    def get_flows(cls, device_id):
+        url = 'http://%s:8181/onos/v1/flows/' %(cls.controller) + device_id
+        result = requests.get(url, auth = cls.auth)
+        if result.ok:
+            return result.json()['flows']
+        return None
+
+    @classmethod
+    def cord_olt_config(cls, olt_device_data = None):
+        '''Configures OLT data for existing devices/switches'''
+        if olt_device_data is None:
+            return
+        did_dict = {}
+        config = { 'devices' : did_dict }
+        devices = cls.get_devices()
+        if not devices:
+            return
+        device_ids = map(lambda d: d['id'], devices)
+        for did in device_ids:
+            access_device_dict = {}
+            access_device_dict['accessDevice'] = olt_device_data
+            did_dict[did] = access_device_dict
+
+        ##configure the device list with access information
+        return cls.config(config)
+
+    @classmethod
+    def install_app(cls, app_file, onos_ip = None):
+        params = {'activate':'true'}
+        headers = {'content-type':'application/octet-stream'}
+        url = cls.applications_url if onos_ip is None else 'http://{0}:8181/onos/v1/applications'.format(onos_ip)
+        with open(app_file, 'rb') as payload:
+            result = requests.post(url, auth = cls.auth,
+                                   params = params, headers = headers,
+                                   data = payload)
+        return result.ok, result.status_code
diff --git a/src/test/utils/OnosFlowCtrl.py b/src/test/utils/OnosFlowCtrl.py
new file mode 100644
index 0000000..0aed3d1
--- /dev/null
+++ b/src/test/utils/OnosFlowCtrl.py
@@ -0,0 +1,216 @@
+import json
+import requests
+import os,sys,time
+from nose.tools import *
+from scapy.all import *
+from OnosCtrl import OnosCtrl
+import fcntl, socket, struct
+
+def get_mac(iface = 'ovsbr0', pad = 4):
+    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+    try:
+        info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', iface[:15]))
+    except:
+        info = ['0'] * 24
+    return '0'*pad + ''.join(['%02x' %ord(char) for char in info[18:24]])
+
+class OnosFlowCtrl:
+
+    auth = ('karaf', 'karaf')
+    controller = os.getenv('ONOS_CONTROLLER_IP') or 'localhost'
+    cfg_url = 'http://%s:8181/onos/v1/flows/' %(controller)
+    
+    def __init__( self,
+                  deviceId,
+                  appId=0,
+                  ingressPort="",
+                  egressPort="",
+                  ethType="",
+                  ethSrc="",
+                  ethDst="",
+                  vlan="",
+                  ipProto="",
+                  ipSrc=(),
+                  ipDst=(),
+                  tcpSrc="",
+                  tcpDst="",
+                  udpDst="",
+                  udpSrc="",
+                  mpls=""):
+        self.deviceId = deviceId
+        self.appId = appId
+        self.ingressPort = ingressPort
+        self.egressPort = egressPort
+        self.ethType = ethType
+        self.ethSrc = ethSrc
+        self.ethDst = ethDst
+        self.vlan = vlan
+        self.ipProto = ipProto
+        self.ipSrc = ipSrc
+        self.ipDst = ipDst
+        self.tcpSrc = tcpSrc
+        self.tcpDst = tcpDst
+        self.udpDst = udpDst
+        self.udpSrc = udpSrc
+        self.mpls = mpls
+
+    @classmethod
+    def get_flows(cls, device_id):
+        return OnosCtrl.get_flows(device_id)
+
+    def addFlow(self):
+        """
+        Description:
+            Creates a single flow in the specified device
+        Required:
+            * deviceId: id of the device
+        Optional:
+            * ingressPort: port ingress device
+            * egressPort: port  of egress device
+            * ethType: specify ethType
+            * ethSrc: specify ethSrc ( i.e. src mac addr )
+            * ethDst: specify ethDst ( i.e. dst mac addr )
+            * ipProto: specify ip protocol
+            * ipSrc: specify ip source address with mask eg. ip#/24
+                as a tuple (type, ip#)
+            * ipDst: specify ip destination address eg. ip#/24
+                as a tuple (type, ip#)
+            * tcpSrc: specify tcp source port
+            * tcpDst: specify tcp destination port
+        Returns:
+            True for successful requests;
+            False for failure/error on requests
+        """
+        flowJson = { "priority":100,
+                     "isPermanent":"true",
+                     "timeout":0,
+                     "deviceId":self.deviceId,
+                     "treatment":{"instructions":[]},
+                     "selector": {"criteria":[]}}
+        if self.appId:
+            flowJson[ "appId" ] = self.appId
+            
+        if self.egressPort:
+            flowJson[ 'treatment' ][ 'instructions' ].append( {
+                    "type":"OUTPUT",
+                    "port":self.egressPort } )
+        if self.ingressPort:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"IN_PORT",
+                    "port":self.ingressPort } )
+        if self.ethType:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"ETH_TYPE",
+                    "ethType":self.ethType } )
+        if self.ethSrc:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"ETH_SRC",
+                    "mac":self.ethSrc } )
+        if self.ethDst:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"ETH_DST",
+                    "mac":self.ethDst } )
+        if self.vlan:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"VLAN_VID",
+                    "vlanId":self.vlan } )
+        if self.mpls:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"MPLS_LABEL",
+                    "label":self.mpls } )
+        if self.ipSrc:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":self.ipSrc[0],
+                    "ip":self.ipSrc[1] } )
+        if self.ipDst:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":self.ipDst[0],
+                    "ip":self.ipDst[1] } )
+        if self.tcpSrc:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"TCP_SRC",
+                    "tcpPort": self.tcpSrc } )
+        if self.tcpDst:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"TCP_DST",
+                    "tcpPort": self.tcpDst } )
+        if self.udpSrc:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"UDP_SRC",
+                    "udpPort": self.udpSrc } )
+        if self.udpDst:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"UDP_DST",
+                    "udpPort": self.udpDst } )
+        if self.ipProto:
+            flowJson[ 'selector' ][ 'criteria' ].append( {
+                    "type":"IP_PROTO",
+                    "protocol": self.ipProto } )
+
+        return self.sendFlow( deviceId=self.deviceId, flowJson=flowJson)
+
+    def removeFlow(self, deviceId, flowId):
+        """
+        Description:
+            Remove specific device flow
+        Required:
+            str deviceId - id of the device
+            str flowId - id of the flow
+        Return:
+            Returns True if successfully deletes flows, otherwise False
+        """
+        # NOTE: REST url requires the intent id to be in decimal form
+        query = self.cfg_url + str( deviceId ) + '/' + str( int( flowId ) )
+        response = requests.delete(query, auth = self.auth)
+        if response:
+            if 200 <= response.status_code <= 299:
+                return True
+            else:
+                return False
+
+        return True
+
+    def findFlow(self, deviceId, **criterias):
+        flows = self.get_flows(deviceId)
+        match_keys = criterias.keys()
+        matches = len(match_keys)
+        num_matched = 0
+        for f in flows:
+            criteria = f['selector']['criteria']
+            for c in criteria:
+                if c['type'] not in match_keys:
+                    continue
+                match_key, match_val = criterias.get(c['type'])
+                val = c[match_key]
+                if val == match_val:
+                    num_matched += 1
+                if num_matched == matches:    
+                    return f['id']
+        return None
+                    
+    def sendFlow(self, deviceId, flowJson):
+        """
+        Description:
+            Sends a single flow to the specified device. This function exists
+            so you can bypass the addFLow driver and send your own custom flow.
+        Required:
+            * The flow in json
+            * the device id to add the flow to
+        Returns:
+            True for successful requests
+            False for error on requests;
+        """
+        url = self.cfg_url + str(deviceId)
+        response = requests.post(url, auth = self.auth, data = json.dumps(flowJson) )
+        if response.ok:
+            if response.status_code in [200, 201]:
+                log.info('Successfully POSTED flow for device %s' %str(deviceId))
+                return True
+            else:
+                log.info('Post flow for device %s failed with status %d' %(str(deviceId), 
+                                                                           response.status_code))
+                return False
+        else:
+            log.error('Flow post request returned with status %d' %response.status_code)
+
+        return False
diff --git a/src/test/utils/Stats.py b/src/test/utils/Stats.py
index 3d8e40c..ef3707d 100644
--- a/src/test/utils/Stats.py
+++ b/src/test/utils/Stats.py
@@ -9,9 +9,10 @@
             self.max = 0
             self.delta_squares = 0
 
-      def update(self, packets = 0, t = 0):
+      def update(self, packets = 0, t = 0, usecs = False):
             self.count += packets
-            t *= 1000000 ##convert to usecs
+            if usecs == False:
+                  t *= 1000000 ##convert to usecs
             if self.start == 0:
                   self.start = t
             self.delta += t
diff --git a/src/test/utils/__init__.py b/src/test/utils/__init__.py
new file mode 100644
index 0000000..6bbc949
--- /dev/null
+++ b/src/test/utils/__init__.py
@@ -0,0 +1,5 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../fsm')
+__path__.append(utils_dir)
diff --git a/src/test/utils/threadPool.py b/src/test/utils/threadPool.py
new file mode 100644
index 0000000..bd3e21e
--- /dev/null
+++ b/src/test/utils/threadPool.py
@@ -0,0 +1,80 @@
+import threading
+import Queue
+
+class PoolThread(threading.Thread):
+
+    def __init__(self, requests_queue, wait_timeout, daemon, **kwds):
+        threading.Thread.__init__(self, **kwds)
+        self.daemon = daemon
+        self._queue = requests_queue
+        self._wait_timeout = wait_timeout
+        self._finished = threading.Event()
+        self.start()
+
+    def run(self):
+        while True:
+            if(self._finished.isSet()):
+                break
+
+            try:
+                work = self._queue.get(block=True, timeout=self._wait_timeout)
+            except Queue.Empty:
+                continue
+            else:
+                try:
+                    work.__call__()
+                finally:
+                    self._queue.task_done()
+
+
+
+class ThreadPool:
+
+    def __init__(self, pool_size, daemon=False, queue_size=0, wait_timeout=5):
+        """Set up the thread pool and create pool_size threads
+        """
+        self._queue = Queue.Queue(queue_size)
+        self._daemon = daemon
+        self._threads = []
+        self._pool_size = pool_size
+        self._wait_timeout = wait_timeout
+        self.createThreads()
+
+
+    def addTask(self, callableObject):
+        if (callable(callableObject)):
+            self._queue.put(callableObject, block=True)
+
+    def cleanUpThreads(self):
+        self._queue.join()
+
+        for t in self._threads:
+            t._finished.set()
+
+
+    def createThreads(self):
+        for i in range(self._pool_size):
+            self._threads.append(PoolThread(self._queue, self._wait_timeout, self._daemon))
+
+
+class CallObject:
+    def __init__(self, v = 0): 
+        self.v = v
+    def callCb(self): 
+        print 'Inside callback for %d' %self.v
+
+if __name__ == '__main__':
+    import multiprocessing
+    callList = []
+    cpu_count = multiprocessing.cpu_count()
+    for i in xrange(cpu_count * 2):
+        callList.append(CallObject(i))
+    tp = ThreadPool(cpu_count * 2, queue_size=1, wait_timeout=1)
+    for i in range(40):
+        callObject = callList[i% (cpu_count*2)]
+        f = callObject.callCb
+        tp.addTask(f)
+
+    tp.cleanUpThreads()
+
+
diff --git a/src/test/vrouter/__init__.py b/src/test/vrouter/__init__.py
new file mode 100644
index 0000000..a42b024
--- /dev/null
+++ b/src/test/vrouter/__init__.py
@@ -0,0 +1,11 @@
+import os,sys
+##add the python path to lookup the utils
+working_dir = os.path.dirname(os.path.realpath(sys.argv[-1]))
+utils_dir = os.path.join(working_dir, '../utils')
+fsm_dir = os.path.join(working_dir, '../fsm')
+cli_dir = os.path.join(working_dir, '../cli')
+subscriber_dir = os.path.join(working_dir, '../subscriber')
+__path__.append(utils_dir)
+__path__.append(fsm_dir)
+__path__.append(cli_dir)
+__path__.append(subscriber_dir)
diff --git a/src/test/vrouter/vrouterTest.py b/src/test/vrouter/vrouterTest.py
new file mode 100644
index 0000000..ab7a12f
--- /dev/null
+++ b/src/test/vrouter/vrouterTest.py
@@ -0,0 +1,442 @@
+import unittest
+from nose.tools import *
+from scapy.all import *
+from OnosCtrl import OnosCtrl
+from OltConfig import OltConfig
+from OnosFlowCtrl import OnosFlowCtrl, get_mac
+from onosclidriver import OnosCliDriver
+from CordContainer import Container, Onos, Quagga
+from CordTestServer import cord_test_onos_restart, cord_test_quagga_restart
+from portmaps import g_subscriber_port_map
+import threading
+import time
+import os
+import json
+log.setLevel('INFO')
+
+class QuaggaStopWrapper(Container):
+
+    def __init__(self, name = 'cord-quagga', image = 'cord-test/quagga', tag = 'latest'):
+        super(QuaggaStopWrapper, self).__init__(name, image, tag = tag)
+        if self.exists():
+            self.kill()
+
+class vrouter_exchange(unittest.TestCase):
+
+    apps = ('org.onosproject.vrouter', 'org.onosproject.fwd')
+    device_id = 'of:' + get_mac('ovsbr0')
+    vrouter_device_dict = { "devices" : {
+                "{}".format(device_id) : {
+                    "basic" : {
+                        "driver" : "softrouter"
+                    }
+                }
+             },
+          }
+    zebra_conf = '''
+password zebra
+log stdout
+service advanced-vty
+!
+!debug zebra rib
+!debug zebra kernel
+!debug zebra fpm
+!
+!interface eth1
+! ip address 10.10.0.3/16
+line vty
+ exec-timeout 0 0
+'''
+    test_path = os.path.dirname(os.path.realpath(__file__))
+    quagga_config_path = os.path.join(test_path, '..', 'setup/quagga-config')
+    onos_config_path = os.path.join(test_path, '..', 'setup/onos-config')
+    GATEWAY = '192.168.10.50'
+    INGRESS_PORT = 1
+    EGRESS_PORT = 2
+    MAX_PORTS = 100
+    peer_list = [ ('192.168.10.1', '00:00:00:00:00:01'), ('192.168.11.1', '00:00:00:00:02:01'), ]
+    network_list = []
+
+    @classmethod
+    def setUpClass(cls):
+        ''' Activate the vrouter apps'''
+        cls.olt = OltConfig()
+        cls.port_map = cls.olt.olt_port_map()
+        if not cls.port_map:
+            cls.port_map = g_subscriber_port_map
+        #cls.vrouter_host_load(host = cls.GATEWAY)
+        time.sleep(3)
+        
+    @classmethod
+    def tearDownClass(cls):
+        '''Deactivate the vrouter apps'''
+        #cls.vrouter_host_unload()
+
+    def cliEnter(self):
+        retries = 0
+        while retries < 3:
+            self.cli = OnosCliDriver(connect = True)
+            if self.cli.handle:
+                break
+            else:
+                retries += 1
+                time.sleep(2)
+
+    def cliExit(self):
+        self.cli.disconnect()
+
+    @classmethod
+    def onos_load_config(cls, config):
+        status, code = OnosCtrl.config(config)
+        if status is False:
+            log.info('JSON request returned status %d' %code)
+            assert_equal(status, True)
+
+    @classmethod
+    def vrouter_config_get(cls, networks = 4, peers = 1):
+        vrouter_configs = cls.generate_vrouter_conf(networks = networks, peers = peers)
+        return vrouter_configs
+        ##ONOS router does not support dynamic reconfigurations
+        #for config in vrouter_configs:
+        #    cls.onos_load_config(config)
+        #    time.sleep(5)
+
+    @classmethod
+    def vrouter_host_load(cls):
+        index = 1
+        for host,_ in cls.peer_list:
+            iface = cls.port_map[index]
+            index += 1
+            config_cmds = ( 'ifconfig {0} {1}'.format(iface, host),
+                            'arping -I {0} {1} -c 2'.format(iface, host),
+                            )
+            for cmd in config_cmds:
+                os.system(cmd)
+
+    @classmethod
+    def vrouter_host_unload(cls):
+        index = 1
+        for host,_ in cls.peer_list:
+            iface = cls.port_map[index]
+            index += 1
+            config_cmds = ('ifconfig {} 0'.format(iface), )
+            for cmd in config_cmds:
+                os.system(cmd)
+
+    @classmethod
+    def start_onos(cls, network_cfg = None):
+        if type(network_cfg) is tuple:
+            res = []
+            for v in network_cfg:
+                res += v.items()
+            config = dict(res)
+        else:
+            config = network_cfg
+        log.info('Restarting ONOS with new network configuration')
+        cfg = json.dumps(config)
+        with open('{}/network-cfg.json'.format(cls.onos_config_path), 'w') as f:
+            f.write(cfg)
+
+        return cord_test_onos_restart()
+
+    @classmethod
+    def start_quagga(cls, networks = 4):
+        log.info('Restarting Quagga container with configuration for %d networks' %(networks))
+        config = cls.generate_conf(networks = networks)
+        host_config_file = '{}/testrib_gen.conf'.format(Quagga.host_quagga_config)
+        guest_config_file = os.path.join(Quagga.guest_quagga_config, 'testrib_gen.conf')
+        with open(host_config_file, 'w') as f:
+            f.write(config)
+        if networks <= 10000:
+            boot_delay = 25
+        else:
+            delay_map = [60, 100, 150, 200, 300, 450, 600, 800, 1000, 1200]
+            n = min(networks/100000, len(delay_map)-1)
+            boot_delay = delay_map[n]
+        cord_test_quagga_restart(config_file = guest_config_file, boot_delay = boot_delay)
+
+    @classmethod
+    def zgenerate_vrouter_conf(cls, networks = 4):
+        num = 0
+        start_network = ( 11 << 24) | ( 0 << 16) | ( 0 << 8) | 0
+        end_network =   ( 200 << 24 ) | ( 0 << 16)  | (0 << 8) | 0
+        ports_dict = { 'ports' : {} }
+        interface_list = []
+        for n in xrange(start_network, end_network):
+            if n & 255 == 0:
+                port_map = ports_dict['ports']
+                port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
+                device_port_key = '{0}/{1}'.format(cls.device_id, port)
+                try:
+                    interfaces = port_map[device_port_key]['interfaces']
+                except:
+                    port_map[device_port_key] = { 'interfaces' : [] }
+                    interfaces = port_map[device_port_key]['interfaces']
+                    
+                ips = '%d.%d.%d.2/24'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
+                if num < cls.MAX_PORTS - 1:
+                    interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : '00:00:00:00:00:01' }
+                    interfaces.append(interface_dict)
+                    interface_list.append(interface_dict['name'])
+                else:
+                    interfaces[0]['ips'].append(ips)
+                num += 1
+                if num == networks:
+                    break
+        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {} } } }
+        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
+        quagga_router_dict['ospfEnabled'] = True
+        quagga_router_dict['interfaces'] = interface_list
+        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, 
+                                                                          networks + 1 if networks < cls.MAX_PORTS else cls.MAX_PORTS )
+        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
+
+    @classmethod
+    def generate_vrouter_conf(cls, networks = 4, peers = 1):
+        num = 0
+        start_peer = ( 192 << 24) | ( 168 << 16)  |  (10 << 8) | 0
+        end_peer =   ( 200 << 24 ) | (168 << 16)  |  (10 << 8) | 0
+        local_network = end_peer + 1
+        ports_dict = { 'ports' : {} }
+        interface_list = []
+        peer_list = []
+        for n in xrange(start_peer, end_peer, 256):
+            port_map = ports_dict['ports']
+            port = num + 1 if num < cls.MAX_PORTS - 1 else cls.MAX_PORTS - 1
+            device_port_key = '{0}/{1}'.format(cls.device_id, port)
+            try:
+                interfaces = port_map[device_port_key]['interfaces']
+            except:
+                port_map[device_port_key] = { 'interfaces' : [] }
+                interfaces = port_map[device_port_key]['interfaces']
+            ip = n + 2
+            peer_ip = n + 1
+            ips = '%d.%d.%d.%d/24'%( (ip >> 24) & 0xff, ( (ip >> 16) & 0xff ), ( (ip >> 8 ) & 0xff ), ip & 0xff)
+            peer = '%d.%d.%d.%d' % ( (peer_ip >> 24) & 0xff, ( ( peer_ip >> 16) & 0xff ), ( (peer_ip >> 8 ) & 0xff ), peer_ip & 0xff )
+            mac = RandMAC()._fix()
+            peer_list.append((peer, mac))
+            if num < cls.MAX_PORTS - 1:
+                interface_dict = { 'name' : 'b1-{}'.format(port), 'ips': [ips], 'mac' : mac }
+                interfaces.append(interface_dict)
+                interface_list.append(interface_dict['name'])
+            else:
+                interfaces[0]['ips'].append(ips)
+            num += 1
+            if num == peers:
+                break
+        quagga_dict = { 'apps': { 'org.onosproject.router' : { 'router' : {}, 'bgp' : { 'bgpSpeakers' : [] } } } }
+        quagga_router_dict = quagga_dict['apps']['org.onosproject.router']['router']
+        quagga_router_dict['ospfEnabled'] = True
+        quagga_router_dict['interfaces'] = interface_list
+        quagga_router_dict['controlPlaneConnectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
+
+        #bgp_speaker_dict = { 'apps': { 'org.onosproject.router' : { 'bgp' : { 'bgpSpeakers' : [] } } } }
+        bgp_speakers_list = quagga_dict['apps']['org.onosproject.router']['bgp']['bgpSpeakers']
+        speaker_dict = {}
+        speaker_dict['name'] = 'bgp{}'.format(peers+1)
+        speaker_dict['connectPoint'] = '{0}/{1}'.format(cls.device_id, peers + 1)
+        speaker_dict['peers'] = peer_list
+        bgp_speakers_list.append(speaker_dict)
+        cls.peer_list = peer_list
+        return (cls.vrouter_device_dict, ports_dict, quagga_dict)
+
+    @classmethod
+    def generate_conf(cls, networks = 4):
+        num = 0
+        start_network = ( 11 << 24) | ( 10 << 16) | ( 10 << 8) | 0
+        end_network =   ( 172 << 24 ) | ( 0 << 16)  | (0 << 8) | 0
+        net_list = []
+        peer_list = cls.peer_list
+        network_list = []
+        for n in xrange(start_network, end_network, 256):
+            net = '%d.%d.%d.0'%( (n >> 24) & 0xff, ( ( n >> 16) & 0xff ), ( (n >> 8 ) & 0xff ) )
+            network_list.append(net)
+            gateway = peer_list[num % len(peer_list)][0]
+            net_route = 'ip route {0}/24 {1}'.format(net, gateway)
+            net_list.append(net_route)
+            num += 1
+            if num == networks:
+                break
+        cls.network_list = network_list
+        zebra_routes = '\n'.join(net_list)
+        #log.info('Zebra routes: \n:%s\n' %cls.zebra_conf + zebra_routes)
+        return cls.zebra_conf + zebra_routes
+    
+    @classmethod
+    def vrouter_activate(cls, deactivate = False):
+        app = 'org.onosproject.vrouter'
+        onos_ctrl = OnosCtrl(app)
+        if deactivate is True:
+            onos_ctrl.deactivate()
+        else:
+            onos_ctrl.activate()
+        time.sleep(3)
+
+    @classmethod
+    def vrouter_configure(cls, networks = 4, peers = 1):
+        ##Deactivate vrouter
+        vrouter_configs = cls.vrouter_config_get(networks = networks, peers = peers)
+        cls.start_onos(network_cfg = vrouter_configs)
+        cls.vrouter_host_load()
+        ##Start quagga
+        cls.start_quagga(networks = networks)
+        return vrouter_configs
+    
+    def vrouter_port_send_recv(self, ingress, egress, dst_mac, dst_ip, positive_test = True):
+        src_mac = '00:00:00:00:00:02'
+        src_ip = '1.1.1.1'
+        self.success = False if positive_test else True
+        timeout = 10 if positive_test else 1
+        count = 2 if positive_test else 1
+        self.start_sending = True
+        def recv_task():
+            def recv_cb(pkt):
+                log.info('Pkt seen with ingress ip %s, egress ip %s' %(pkt[IP].src, pkt[IP].dst))
+                self.success = True if positive_test else False
+            sniff(count=count, timeout=timeout,
+                  lfilter = lambda p: IP in p and p[IP].dst == dst_ip and p[IP].src == src_ip,
+                  prn = recv_cb, iface = self.port_map[ingress])
+            self.start_sending = False
+
+        t = threading.Thread(target = recv_task)
+        t.start()
+        L2 = Ether(src = src_mac, dst = dst_mac)
+        L3 = IP(src = src_ip, dst = dst_ip)
+        pkt = L2/L3
+        log.info('Sending a packet with dst ip %s, dst mac %s on port %s to verify if flows are correct' %
+                 (dst_ip, dst_mac, self.port_map[egress]))
+        while self.start_sending is True:
+            sendp(pkt, count=50, iface = self.port_map[egress])
+        t.join()
+        assert_equal(self.success, True)
+
+    def vrouter_traffic_verify(self, positive_test = True):
+        peers = len(self.peer_list)
+        egress = peers + 1
+        num = 0
+        num_hosts = 5 if positive_test else 1
+        for network in self.network_list:
+            num_ips = num_hosts
+            octets = network.split('.')
+            for i in xrange(num_ips):
+                octets[-1] = str(int(octets[-1]) + 1)
+                dst_ip = '.'.join(octets)
+                dst_mac = self.peer_list[ num % peers ] [1]
+                port = (num % peers)
+                ingress = port + 1
+                #Since peers are on the same network
+                ##Verify if flows are setup by sending traffic across
+                self.vrouter_port_send_recv(ingress, egress, dst_mac, dst_ip, positive_test = positive_test)
+            num += 1
+    
+    def __vrouter_network_verify(self, networks, peers = 1, positive_test = True):
+        _, ports_map, egress_map = self.vrouter_configure(networks = networks, peers = peers)
+        self.cliEnter()
+        ##Now verify
+        hosts = json.loads(self.cli.hosts(jsonFormat = True))
+        log.info('Discovered hosts: %s' %hosts)
+        ##We read from cli if we expect less number of routes to avoid cli timeouts
+        if networks <= 10000:
+            routes = json.loads(self.cli.routes(jsonFormat = True))
+            #log.info('Routes: %s' %routes)
+            assert_equal(len(routes['routes4']), networks)
+            flows = json.loads(self.cli.flows(jsonFormat = True))
+            flows = filter(lambda f: f['flows'], flows)
+            #log.info('Flows: %s' %flows)
+            assert_not_equal(len(flows), 0)
+        self.vrouter_traffic_verify()
+        if positive_test is False:
+            self.__vrouter_network_verify_negative(networks, peers = peers)
+        self.cliExit()
+        self.vrouter_host_unload()
+        return True
+
+    def __vrouter_network_verify_negative(self, networks, peers = 1):
+        ##Stop quagga. Test traffic again to see if flows were removed
+        log.info('Stopping Quagga container')
+        quaggaStop = QuaggaStopWrapper()
+        time.sleep(2)
+        if networks <= 10000:
+            routes = json.loads(self.cli.routes(jsonFormat = True))
+            #Verify routes have been removed
+            if routes and routes.has_key('routes4'):
+                assert_equal(len(routes['routes4']), 0)
+        self.vrouter_traffic_verify(positive_test = False)
+        log.info('OVS flows have been removed successfully after Quagga was stopped')
+        self.start_quagga(networks = networks)
+        ##Verify the flows again after restarting quagga back
+        if networks <= 10000:
+            routes = json.loads(self.cli.routes(jsonFormat = True))
+            assert_equal(len(routes['routes4']), networks)
+        self.vrouter_traffic_verify()
+        log.info('OVS flows have been successfully reinstalled after Quagga was restarted')
+
+    def test_vrouter_1(self):
+        '''Test vrouter with 5 routes'''
+        res = self.__vrouter_network_verify(5, peers = 1)
+        assert_equal(res, True)
+
+    def test_vrouter_2(self):
+        '''Test vrouter with 5 routes with 2 peers'''
+        res = self.__vrouter_network_verify(5, peers = 2)
+        assert_equal(res, True)
+
+    def test_vrouter_3(self):
+        '''Test vrouter with 6 routes with 3 peers'''
+        res = self.__vrouter_network_verify(6, peers = 3)
+        assert_equal(res, True)
+
+    def test_vrouter_4(self):
+        '''Test vrouter with 50 routes'''
+        res = self.__vrouter_network_verify(50, peers = 1)
+        assert_equal(res, True)
+
+    def test_vrouter_5(self):
+        '''Test vrouter with 50 routes and 5 peers'''
+        res = self.__vrouter_network_verify(50, peers = 5)
+        assert_equal(res, True)
+
+    def test_vrouter_6(self):
+        '''Test vrouter with 100 routes'''
+        res = self.__vrouter_network_verify(100, peers = 1)
+        assert_equal(res, True)
+
+    def test_vrouter_7(self):
+        '''Test vrouter with 100 routes and 10 peers'''
+        res = self.__vrouter_network_verify(100, peers = 10)
+        assert_equal(res, True)
+
+    def test_vrouter_8(self):
+        '''Test vrouter with 300 routes'''
+        res = self.__vrouter_network_verify(300, peers = 1)
+        assert_equal(res, True)
+
+    def test_vrouter_9(self):
+        '''Test vrouter with 1000 routes'''
+        res = self.__vrouter_network_verify(1000, peers = 1)
+        assert_equal(res, True)
+    
+    def test_vrouter_10(self):
+        '''Test vrouter with 10000 routes'''
+        res = self.__vrouter_network_verify(10000, peers = 1)
+        assert_equal(res, True)
+    
+    @nottest
+    def test_vrouter_11(self):
+        '''Test vrouter with 100000 routes'''
+        res = self.__vrouter_network_verify(100000, peers = 1)
+        assert_equal(res, True)
+
+    @nottest
+    def test_vrouter_12(self):
+        '''Test vrouter with 1000000 routes'''
+        res = self.__vrouter_network_verify(1000000, peers = 1)
+        assert_equal(res, True)
+
+    def test_vrouter_13(self):
+        '''Test vrouter by installing 5 routes, removing Quagga and re-starting Quagga back'''
+        res = self.__vrouter_network_verify(5, peers = 1, positive_test = False)
+
+    def test_vrouter_14(self):
+        '''Test vrouter by installing 50 routes, removing Quagga and re-starting Quagga back'''
+        res = self.__vrouter_network_verify(50, peers = 1, positive_test = False)