Revert "Merge pull request #19 from rlane/move-oftest"

This reverts commit 130b6a33a9858351886668f7ef07be121cc6a0e1, reversing
changes made to 5d2a768c671a549473f68131a0fc11737bf43952.

Moving testutils.py broke some imports.
diff --git a/tests/oft b/tests/oft
deleted file mode 120000
index 0b189a0..0000000
--- a/tests/oft
+++ /dev/null
@@ -1 +0,0 @@
-../oft
\ No newline at end of file
diff --git a/tests/oft b/tests/oft
new file mode 100755
index 0000000..091193e
--- /dev/null
+++ b/tests/oft
@@ -0,0 +1,580 @@
+#!/usr/bin/env python
+"""
+@package oft
+
+OpenFlow test framework top level script
+
+This script is the entry point for running OpenFlow tests
+using the OFT framework.
+
+The global configuration is passed around in a dictionary
+generally called config.  The keys have the following
+significance.
+
+<pre>
+    platform          : String identifying the target platform
+    controller_host   : Host on which test controller is running (for sockets)
+    controller_port   : Port on which test controller listens for switch cxn
+    test_dir          : (TBD) Directory to search for test files (default .)
+    test_spec         : (TBD) Specification of test(s) to run
+    log_file          : Filename for test logging
+    list              : Boolean:  List all tests and exit
+    debug             : String giving debug level (info, warning, error...)
+</pre>
+
+See config_defaults below for the default values.
+
+The following are stored in the config dictionary, but are not currently
+configurable through the command line.
+
+<pre>
+    dbg_level         : logging module value of debug level
+    port_map          : Map of dataplane OpenFlow port to OS interface names
+    test_mod_map      : Dictionary indexed by module names and whose value
+                        is the module reference
+    all_tests         : Dictionary indexed by module reference and whose
+                        value is a list of functions in that module
+</pre>
+
+Each test may be assigned a priority by setting test_prio["TestName"] in 
+the respective module.  For now, the only use of this is to avoid 
+automatic inclusion of tests into the default list.  This is done by
+setting the test_prio value less than 0.  Eventually we may add ordering
+of test execution by test priority.
+
+To add a test to the system, either: edit an existing test case file (like
+basic.py) to add a test class which inherits from unittest.TestCase (directly
+or indirectly); or add a new file which includes a function definition 
+test_set_init(config).  Preferably the file is in the same directory as existing
+tests, though you can specify the directory on the command line.  The file
+should not be called "all" as that's reserved for the test-spec.
+
+If you add a new file, the test_set_init function should record the port
+map object from the configuration along with whatever other configuration 
+information it may need.
+
+TBD:  To add configuration to the system, first add an entry to config_default
+below.  If you want this to be a command line parameter, edit config_setup
+to add the option and default value to the parser.  Then edit config_get
+to make sure the option value gets copied into the configuration 
+structure (which then gets passed to everyone else).
+
+By convention, oft attempts to import the contents of a file by the 
+name of $platform.py into the local namespace.  
+
+IMPORTANT: That file should define a function platform_config_update which
+takes a configuration dictionary as an argument and updates it for the
+current run.  In particular, it should set up config["port_map"] with
+the proper map from OF port numbers to OF interface names.
+
+You can add your own platform, say gp104, by adding a file gp104.py to the
+platforms directory that defines the function platform_config_update and then
+use the parameter --platform=gp104 on the command line. You can also use the
+--platform-dir option to change which directory is searched.
+
+The current model for test sets is basic.py.  The current convention is
+that the test set should implement a function test_set_init which takes
+an oft configuration dictionary and returns a unittest.TestSuite object.
+Future test sets should do the same thing.
+
+Default setup:
+
+The default setup runs locally using veth pairs.  To exercise this, 
+checkout and build an openflow userspace datapath.  Then start it on 
+the local host:
+<pre>
+  sudo ~/openflow/regress/bin/veth_setup.pl 
+  sudo ofdatapath -i veth0,veth2,veth4,veth6 punix:/tmp/ofd &
+  sudo ofprotocol unix:/tmp/ofd tcp:127.0.0.1 --fail=closed --max-backoff=1 &
+
+Next, run oft: 
+  sudo ./oft --debug=info
+</pre>
+
+Examine oft.log if things don't work.
+
+@todo Support per-component debug levels (esp controller vs dataplane)
+@todo Consider moving oft up a level
+
+Current test case setup:
+    Files in the tests direcoty that contain a function test_set_init are
+considered test files.
+    The function test_set_init examines the test_spec config variable
+and generates a suite of tests.
+    Support a command line option --test_mod so that all tests in that
+module will be run.
+    Support all to specify all tests from the module.
+
+"""
+
+import sys
+from optparse import OptionParser
+from subprocess import Popen,PIPE
+import logging
+import unittest
+import time
+import os
+import imp
+import random
+
+pydir = os.path.join(os.path.dirname(__file__), '..', 'src', 'python')
+if os.path.exists(os.path.join(pydir, 'oftest')):
+    # Running from source tree
+    sys.path.insert(0, pydir)
+
+import testutils
+import oftest.ofutils
+
+try:
+    import scapy.all as scapy
+except:
+    try:
+        import scapy as scapy
+    except:
+        sys.exit("Need to install scapy for packet parsing")
+
+##@var Profile module
+profile_mod = None
+
+##@var DEBUG_LEVELS
+# Map from strings to debugging levels
+DEBUG_LEVELS = {
+    'debug'              : logging.DEBUG,
+    'verbose'            : logging.DEBUG,
+    'info'               : logging.INFO,
+    'warning'            : logging.WARNING,
+    'warn'               : logging.WARNING,
+    'error'              : logging.ERROR,
+    'critical'           : logging.CRITICAL
+}
+
+_debug_default = "warning"
+_debug_level_default = DEBUG_LEVELS[_debug_default]
+
+root_dir = os.path.join(os.path.dirname(__file__), "..")
+
+##@var config_default
+# The default configuration dictionary for OFT
+config_default = {
+    "param"              : None,
+    "platform"           : "local",
+    "platform_args"      : None,
+    "controller_host"    : "0.0.0.0",
+    "controller_port"    : 6633,
+    "relax"              : False,
+    "test_spec"          : "all",
+    "test_dir"           : os.path.dirname(__file__),
+    "log_file"           : "oft.log",
+    "list"               : False,
+    "list_test_names"    : False, 
+    "debug"              : _debug_default,
+    "dbg_level"          : _debug_level_default,
+    "port_map"           : {},
+    "test_params"        : "None",
+    "profile"            : "default",
+    "allow_user"         : False,
+    "fail_skipped"       : False,
+    "default_timeout"    : 2,
+    "minsize"            : 0,
+    "random_seed"        : None,
+    "platform_dir"       : os.path.join(root_dir, "platforms"),
+    "profile_dir"        : os.path.join(root_dir, "profiles"),
+}
+
+# Default test priority
+TEST_PRIO_DEFAULT=100
+TEST_PRIO_SKIP=-1
+
+#@todo Set up a dict of config params so easier to manage:
+# <param> <cmdline flags> <default value> <help> <optional parser>
+
+# Map options to config structure
+def config_get(opts):
+    "Convert options class to OFT configuration dictionary"
+    cfg = config_default.copy()
+    for key in cfg.keys():
+        cfg[key] = eval("opts." + key)
+
+    # Special case checks
+    if opts.debug not in DEBUG_LEVELS.keys():
+        print "Warning:  Bad value specified for debug level; using default"
+        opts.debug = _debug_default
+    if opts.verbose:
+        cfg["debug"] = "verbose"
+    cfg["dbg_level"] = DEBUG_LEVELS[cfg["debug"]]
+
+    return cfg
+
+def config_setup(cfg_dflt):
+    """
+    Set up the configuration including parsing the arguments
+
+    @param cfg_dflt The default configuration dictionary
+    @return A pair (config, args) where config is an config
+    object and args is any additional arguments from the command line
+    """
+
+    parser = OptionParser(version="%prog 0.1")
+
+    #@todo parse port map as option?
+    # Set up default values
+    for key in cfg_dflt.keys():
+        eval("parser.set_defaults("+key+"=cfg_dflt['"+key+"'])")
+
+    #@todo Add options via dictionary
+    plat_help = """Set the platform type.  Valid values include:
+        local:  User space virtual ethernet pair setup
+        remote:  Remote embedded Broadcom based switch
+        Create a new_plat.py file and use --platform=new_plat on the command line
+        """
+    parser.add_option("-a", "--platform-args", help="Custom arguments per platform.")
+    parser.add_option("-P", "--platform", help=plat_help)
+    parser.add_option("-H", "--host", dest="controller_host",
+                      help="The IP/name of the test controller host")
+    parser.add_option("-p", "--port", dest="controller_port",
+                      type="int", help="Port number of the test controller")
+    test_list_help = """Indicate tests to run.  Valid entries are "all" (the
+        default) or a comma separated list of:
+        module            Run all tests in the named module
+        testcase          Run tests in all modules with the name testcase
+        module.testcase   Run the specific test case
+        """
+    parser.add_option("-T", "--test-spec", "--test-list", help=test_list_help)
+    parser.add_option("--log-file", 
+                      help="Name of log file, empty string to log to console")
+    parser.add_option("--debug",
+                      help="Debug lvl: debug, info, warning, error, critical")
+    parser.add_option("--port-count", type="int",
+                      help="Number of ports to use (optional)")
+    parser.add_option("--base-of-port", type="int",
+                      help="Base OpenFlow port number (optional)")
+    parser.add_option("--base-if-index", type="int",
+                      help="Base interface index number (optional)")
+    parser.add_option("--list-test-names", action='store_true',
+                      help="List only test names.", default=False)
+    parser.add_option("--list", action="store_true",
+                      help="List all tests and exit")
+    parser.add_option("--verbose", action="store_true",
+                      help="Short cut for --debug=verbose")
+    parser.add_option("--relax", action="store_true",
+                      help="Relax packet match checks allowing other packets")
+    parser.add_option("--param", type="int",
+                      help="Parameter sent to test (for debugging)")
+    parser.add_option("--profile", 
+                      help="File listing tests to skip/run")
+    parser.add_option("-t", "--test-params",
+                      help="""Set test parameters: key=val;...
+        NOTE:  key MUST be a valid Python identifier, egr_count not egr-count
+        See --list""")
+    parser.add_option("--allow-user", action="store_true",
+                      help="Proceed even if oftest is not run as root")
+    parser.add_option("--fail-skipped", action="store_true",
+                      help="Return failure if any test was skipped")
+    parser.add_option("--default-timeout", type="int",
+                      help="Timeout in seconds for most operations")
+    parser.add_option("--minsize", type="int", 
+                      help="Minimum allowable packet size on the dataplane.", 
+                      default=0)
+    parser.add_option("--random-seed", type="int",
+                      help="Random number generator seed",
+                      default=None)
+    parser.add_option("--test-dir", type="string",
+                      help="Directory containing tests")
+    parser.add_option("--platform-dir", type="string",
+                      help="Directory containing platform modules")
+    parser.add_option("--profile-dir", type="string",
+                      help="Directory containing profile modules")
+
+    # Might need this if other parsers want command line
+    # parser.allow_interspersed_args = False
+    (options, args) = parser.parse_args()
+
+    config = config_get(options)
+
+    return (config, args)
+
+def load_profile(config):
+    """
+    Import a profile from the profiles library
+    """
+
+    global profile_mod
+    logging.info("Importing profile: %s" % config["profile"])
+    try:
+        profile_mod = imp.load_module(config["profile"], *imp.find_module(config["profile"], [config["profile_dir"]]))
+        if not "skip_test_list" in dir(profile_mod):
+            die("Profile did not define skip_test_list")
+    except:
+        logging.info("Could not import profile: %s.py" % config["profile"])
+        print "Failed to import profile: %s" % config["profile"]
+        raise
+
+def logging_setup(config):
+    """
+    Set up logging based on config
+    """
+    _format = "%(asctime)s  %(name)-10s: %(levelname)-8s: %(message)s"
+    _datefmt = "%H:%M:%S"
+    logging.basicConfig(filename=config["log_file"],
+                        level=config["dbg_level"],
+                        format=_format, datefmt=_datefmt)
+
+def test_list_generate(config):
+    """Generate the list of all known tests indexed by module name
+
+    Conventions:  Test files must implement the function test_set_init
+
+    Test cases are classes that implement runTest
+
+    @param config The oft configuration dictionary
+    @returns An array of triples (mod-name, module, [tests]) where 
+    mod-name is the string (filename) of the module, module is the
+    value returned from __import__'ing the module and [tests] is an
+    array of strings giving the test cases from the module.  
+    """
+
+    # Find and import test files
+    p1 = Popen(["find", config["test_dir"], "-type","f"], stdout = PIPE)
+    p2 = Popen(["xargs", "grep", "-l", "-e", "^def test_set_init"], 
+                stdin=p1.stdout, stdout=PIPE)
+
+    all_tests = {}
+    mod_name_map = {}
+    # There's an extra empty entry at the end of the list 
+    filelist = p2.communicate()[0].split("\n")[:-1]
+    for file in filelist:
+        if file[-1:] == '~' or file[0] == '#':
+            continue
+        modname = os.path.splitext(os.path.basename(file))[0]
+
+        try:
+            if sys.modules.has_key(modname):
+                mod = sys.modules[modname]
+            else:
+                mod = imp.load_module(modname, *imp.find_module(modname, [os.path.dirname(file)]))
+        except:
+            logging.warning("Could not import file " + file)
+            raise
+
+        tests = [k for k in dir(mod) if type(getattr(mod, k)) == type and
+                                        issubclass(getattr(mod, k), unittest.TestCase)]
+        if tests:
+            mod_name_map[modname] = mod
+            all_tests[mod] = tests
+
+    config["all_tests"] = all_tests
+    config["mod_name_map"] = mod_name_map
+
+def die(msg, exit_val=1):
+    print msg
+    logging.critical(msg)
+    sys.exit(exit_val)
+
+def add_test(suite, mod, name):
+    logging.info("Adding test " + mod.__name__ + "." + name)
+    suite.addTest(eval("mod." + name)())
+
+def _space_to(n, str):
+    """
+    Generate a string of spaces to achieve width n given string str
+    If length of str >= n, return one space
+    """
+    spaces = n - len(str)
+    if spaces > 0:
+        return " " * spaces
+    return " "
+
+def test_prio_get(mod, test):
+    """
+    Return the priority of a test
+
+    If test is in "skip list" from profile, return the skip value
+
+    If set in the test_prio variable for the module, return
+    that value.  Otherwise return 100 (default)
+    """
+    if test in profile_mod.skip_test_list:
+        logging.info("Skipping test %s due to profile" % test)
+        return TEST_PRIO_SKIP
+    if 'test_prio' in dir(mod):
+        if test in mod.test_prio.keys():
+            return mod.test_prio[test]
+    return TEST_PRIO_DEFAULT
+
+#
+# Main script
+#
+
+# Get configuration, set up logging, import platform from file
+(config, args) = config_setup(config_default)
+
+test_list_generate(config)
+oft_config = config
+
+# Check if test list is requested; display and exit if so
+if config["list"]:
+    did_print = False
+    mod_count = 0
+    test_count = 0
+    print "\nTest List:"
+    for mod in config["all_tests"].keys():
+        if config["test_spec"] != "all" and \
+                config["test_spec"] != mod.__name__:
+            continue
+        mod_count += 1
+        did_print = True
+        desc = mod.__doc__.strip()
+        desc = desc.split('\n')[0]
+        start_str = "  Module " + mod.__name__ + ": "
+        print start_str + _space_to(22, start_str) + desc
+        for test in config["all_tests"][mod]:
+            try:
+                desc = eval('mod.' + test + '.__doc__.strip()')
+                desc = desc.split('\n')[0]
+            except:
+                desc = "No description"
+            if test_prio_get(mod, test) < 0:
+                start_str = "  * " + test + ":"
+            else:
+                start_str = "    " + test + ":"
+            if len(start_str) > 22:
+                desc = "\n" + _space_to(22, "") + desc
+            print start_str + _space_to(22, start_str) + desc
+            test_count += 1
+        print
+    if not did_print:
+        print "No tests found for " + config["test_spec"]
+    else:
+        print "%d modules shown with a total of %d tests" % \
+            (mod_count, test_count)
+        print
+        print "Tests preceded by * are not run by default"
+    print "Tests marked (TP1) after name take --test-params including:"
+    print "    'vid=N;strip_vlan=bool;add_vlan=bool'"
+    print "Note that --profile may override which tests are run"
+    sys.exit(0)
+
+# Check if test list is requested; display and exit if so
+if config["list_test_names"]:
+    for mod in config["all_tests"].keys():
+        if config["test_spec"] != "all" and \
+                config["test_spec"] != mod.__name__:
+            continue
+        desc = mod.__doc__.strip()
+        desc = desc.split('\n')[0]
+        for test in config["all_tests"][mod]:
+            print "%s.%s" % (mod.__name__, test)
+    sys.exit(0)
+
+logging_setup(config)
+logging.info("++++++++ " + time.asctime() + " ++++++++")
+
+load_profile(config)
+
+# Generate the test suite
+#@todo Decide if multiple suites are ever needed
+suite = unittest.TestSuite()
+
+#@todo Allow specification of priority to override prio check
+if config["test_spec"] == "all":
+    for mod in config["all_tests"].keys(): 
+       for test in config["all_tests"][mod]:
+           # For now, a way to avoid tests
+           if test_prio_get(mod, test) >= 0:
+               add_test(suite, mod, test)
+
+else:
+    for ts_entry in config["test_spec"].split(","):
+        parts = ts_entry.split(".")
+
+        if len(parts) == 1: # Either a module or test name
+            if ts_entry in config["mod_name_map"].keys():
+                mod = config["mod_name_map"][ts_entry]
+                for test in config["all_tests"][mod]:
+                    if test_prio_get(mod, test) >= 0:
+                        add_test(suite, mod, test)
+            else: # Search for matching tests
+                test_found = False
+                for mod in config["all_tests"].keys():
+                    if ts_entry in config["all_tests"][mod]:
+                        add_test(suite, mod, ts_entry)
+                        test_found = True
+                if not test_found:
+                    die("Could not find module or test: " + ts_entry)
+
+        elif len(parts) == 2: # module.test
+            if parts[0] not in config["mod_name_map"]:
+                die("Unknown module in test spec: " + ts_entry)
+            mod = config["mod_name_map"][parts[0]]
+            if parts[1] in config["all_tests"][mod]:
+                add_test(suite, mod, parts[1])
+            else:
+                die("No known test matches: " + ts_entry)
+
+        else:
+            die("Bad test spec: " + ts_entry)
+
+# Load the platform module
+platform_name = config["platform"]
+logging.info("Importing platform: " + platform_name)
+platform_mod = None
+try:
+    platform_mod = imp.load_module(platform_name, *imp.find_module(platform_name, [config["platform_dir"], config["test_dir"]]))
+except:
+    logging.warn("Failed to import " + platform_name + " platform module")
+    raise
+
+try:
+    platform_mod.platform_config_update(config)
+except:
+    logging.warn("Could not run platform host configuration")
+    raise
+
+if not config["port_map"]:
+    die("Interface port map was not defined by the platform. Exiting.")
+
+logging.debug("Configuration: " + str(config))
+logging.info("OF port map: " + str(config["port_map"]))
+
+# Init the test sets
+for (modname,mod) in config["mod_name_map"].items():
+    try:
+        mod.test_set_init(config)
+    except:
+        logging.warning("Could not run test_set_init for " + modname)
+        raise
+
+if config["dbg_level"] == logging.CRITICAL:
+    _verb = 0
+elif config["dbg_level"] >= logging.WARNING:
+    _verb = 1
+else:
+    _verb = 2
+
+oftest.ofutils.default_timeout = config["default_timeout"]
+testutils.MINSIZE = config['minsize']
+
+if os.getuid() != 0 and not config["allow_user"]:
+    print "ERROR: Super-user privileges required. Please re-run with " \
+          "sudo or as root."
+    exit(1)
+
+if config["random_seed"] is not None:
+    logging.info("Random seed: %d" % config["random_seed"])
+    random.seed(config["random_seed"])
+
+
+if __name__ == "__main__":
+    logging.info("*** TEST RUN START: " + time.asctime())
+    result = unittest.TextTestRunner(verbosity=_verb).run(suite)
+    if testutils.skipped_test_count > 0:
+        ts = " tests"
+        if testutils.skipped_test_count == 1: ts = " test"
+        logging.info("Skipped " + str(testutils.skipped_test_count) + ts)
+        print("Skipped " + str(testutils.skipped_test_count) + ts)
+    logging.info("*** TEST RUN END  : " + time.asctime())
+    if result.failures or result.errors:
+        # exit(1) hangs sometimes
+        os._exit(1)
+    if testutils.skipped_test_count > 0 and config["fail_skipped"]:
+        os._exit(1)
diff --git a/tests/testutils.py b/tests/testutils.py
new file mode 100644
index 0000000..b9e69d4
--- /dev/null
+++ b/tests/testutils.py
@@ -0,0 +1,885 @@
+
+import sys
+import copy
+
+try:
+    import scapy.all as scapy
+except:
+    try:
+        import scapy as scapy
+    except:
+        sys.exit("Need to install scapy for packet parsing")
+
+import oftest.controller as controller
+import oftest.cstruct as ofp
+import oftest.message as message
+import oftest.dataplane as dataplane
+import oftest.action as action
+import oftest.parse as parse
+import logging
+import types
+
+global skipped_test_count
+skipped_test_count = 0
+
+# Some useful defines
+IP_ETHERTYPE = 0x800
+TCP_PROTOCOL = 0x6
+UDP_PROTOCOL = 0x11
+
+MINSIZE = 0
+
+def clear_switch(parent, port_list, logger):
+    """
+    Clear the switch configuration
+
+    @param parent Object implementing controller and assert equal
+    @param logger Logging object
+    """
+    for port in port_list:
+        clear_port_config(parent, port, logger)
+    delete_all_flows(parent.controller, logger)
+
+def delete_all_flows(ctrl, logger):
+    """
+    Delete all flows on the switch
+    @param ctrl The controller object for the test
+    @param logger Logging object
+    """
+
+    logger.info("Deleting all flows")
+    msg = message.flow_mod()
+    msg.match.wildcards = ofp.OFPFW_ALL
+    msg.out_port = ofp.OFPP_NONE
+    msg.command = ofp.OFPFC_DELETE
+    msg.buffer_id = 0xffffffff
+    return ctrl.message_send(msg)
+
+def clear_port_config(parent, port, logger):
+    """
+    Clear the port configuration (currently only no flood setting)
+
+    @param parent Object implementing controller and assert equal
+    @param logger Logging object
+    """
+    rv = port_config_set(parent.controller, port,
+                         0, ofp.OFPPC_NO_FLOOD, logger)
+    self.assertEqual(rv, 0, "Failed to reset port config")
+
+def required_wildcards(parent):
+    w = test_param_get(parent.config, 'required_wildcards', default='default')
+    if w == 'l3-l4':
+        return (ofp.OFPFW_NW_SRC_ALL | ofp.OFPFW_NW_DST_ALL | ofp.OFPFW_NW_TOS
+                | ofp.OFPFW_NW_PROTO | ofp.OFPFW_TP_SRC | ofp.OFPFW_TP_DST)
+    else:
+        return 0
+
+def simple_tcp_packet(pktlen=100, 
+                      dl_dst='00:01:02:03:04:05',
+                      dl_src='00:06:07:08:09:0a',
+                      dl_vlan_enable=False,
+                      dl_vlan=0,
+                      dl_vlan_pcp=0,
+                      dl_vlan_cfi=0,
+                      ip_src='192.168.0.1',
+                      ip_dst='192.168.0.2',
+                      ip_tos=0,
+                      tcp_sport=1234,
+                      tcp_dport=80
+                      ):
+    """
+    Return a simple dataplane TCP packet
+
+    Supports a few parameters:
+    @param len Length of packet in bytes w/o CRC
+    @param dl_dst Destinatino MAC
+    @param dl_src Source MAC
+    @param dl_vlan_enable True if the packet is with vlan, False otherwise
+    @param dl_vlan VLAN ID
+    @param dl_vlan_pcp VLAN priority
+    @param ip_src IP source
+    @param ip_dst IP destination
+    @param ip_tos IP ToS
+    @param tcp_dport TCP destination port
+    @param ip_sport TCP source port
+
+    Generates a simple TCP request.  Users
+    shouldn't assume anything about this packet other than that
+    it is a valid ethernet/IP/TCP frame.
+    """
+
+    if MINSIZE > pktlen:
+        pktlen = MINSIZE
+
+    # Note Dot1Q.id is really CFI
+    if (dl_vlan_enable):
+        pkt = scapy.Ether(dst=dl_dst, src=dl_src)/ \
+            scapy.Dot1Q(prio=dl_vlan_pcp, id=dl_vlan_cfi, vlan=dl_vlan)/ \
+            scapy.IP(src=ip_src, dst=ip_dst, tos=ip_tos)/ \
+            scapy.TCP(sport=tcp_sport, dport=tcp_dport)
+    else:
+        pkt = scapy.Ether(dst=dl_dst, src=dl_src)/ \
+            scapy.IP(src=ip_src, dst=ip_dst, tos=ip_tos)/ \
+            scapy.TCP(sport=tcp_sport, dport=tcp_dport)
+
+    pkt = pkt/("D" * (pktlen - len(pkt)))
+
+    return pkt
+
+def simple_icmp_packet(pktlen=60, 
+                      dl_dst='00:01:02:03:04:05',
+                      dl_src='00:06:07:08:09:0a',
+                      dl_vlan_enable=False,
+                      dl_vlan=0,
+                      dl_vlan_pcp=0,
+                      ip_src='192.168.0.1',
+                      ip_dst='192.168.0.2',
+                      ip_tos=0,
+                      icmp_type=8,
+                      icmp_code=0
+                      ):
+    """
+    Return a simple ICMP packet
+
+    Supports a few parameters:
+    @param len Length of packet in bytes w/o CRC
+    @param dl_dst Destinatino MAC
+    @param dl_src Source MAC
+    @param dl_vlan_enable True if the packet is with vlan, False otherwise
+    @param dl_vlan VLAN ID
+    @param dl_vlan_pcp VLAN priority
+    @param ip_src IP source
+    @param ip_dst IP destination
+    @param ip_tos IP ToS
+    @param icmp_type ICMP type
+    @param icmp_code ICMP code
+
+    Generates a simple ICMP ECHO REQUEST.  Users
+    shouldn't assume anything about this packet other than that
+    it is a valid ethernet/ICMP frame.
+    """
+
+    if MINSIZE > pktlen:
+        pktlen = MINSIZE
+
+    if (dl_vlan_enable):
+        pkt = scapy.Ether(dst=dl_dst, src=dl_src)/ \
+            scapy.Dot1Q(prio=dl_vlan_pcp, id=0, vlan=dl_vlan)/ \
+            scapy.IP(src=ip_src, dst=ip_dst, tos=ip_tos)/ \
+            scapy.ICMP(type=icmp_type, code=icmp_code)
+    else:
+        pkt = scapy.Ether(dst=dl_dst, src=dl_src)/ \
+            scapy.IP(src=ip_src, dst=ip_dst, tos=ip_tos)/ \
+            scapy.ICMP(type=icmp_type, code=icmp_code)
+
+    pkt = pkt/("0" * (pktlen - len(pkt)))
+
+    return pkt
+
+def simple_eth_packet(pktlen=60,
+                      dl_dst='00:01:02:03:04:05',
+                      dl_src='01:80:c2:00:00:00',
+                      dl_type=0x88cc):
+
+    if MINSIZE > pktlen:
+        pktlen = MINSIZE
+
+    pkt = scapy.Ether(dst=dl_dst, src=dl_src, type=dl_type)
+
+    pkt = pkt/("0" * (pktlen - len(pkt)))
+
+    return pkt
+
+def do_barrier(ctrl):
+    """
+    Do a barrier command
+    Return 0 on success, -1 on error
+    """
+    b = message.barrier_request()
+    (resp, pkt) = ctrl.transact(b)
+    # We'll trust the transaction processing in the controller that xid matched
+    if not resp:
+        return -1
+    return 0
+
+def port_config_get(controller, port_no, logger):
+    """
+    Get a port's configuration
+
+    Gets the switch feature configuration and grabs one port's
+    configuration
+
+    @returns (hwaddr, config, advert) The hwaddress, configuration and
+    advertised values
+    """
+    request = message.features_request()
+    reply, pkt = controller.transact(request)
+    logger.debug(reply.show())
+    if reply is None:
+        logger.warn("Get feature request failed")
+        return None, None, None
+    for idx in range(len(reply.ports)):
+        if reply.ports[idx].port_no == port_no:
+            return (reply.ports[idx].hw_addr, reply.ports[idx].config,
+                    reply.ports[idx].advertised)
+    
+    logger.warn("Did not find port number for port config")
+    return None, None, None
+
+def port_config_set(controller, port_no, config, mask, logger):
+    """
+    Set the port configuration according the given parameters
+
+    Gets the switch feature configuration and updates one port's
+    configuration value according to config and mask
+    """
+    logger.info("Setting port " + str(port_no) + " to config " + str(config))
+    request = message.features_request()
+    reply, pkt = controller.transact(request)
+    if reply is None:
+        return -1
+    logger.debug(reply.show())
+    for idx in range(len(reply.ports)):
+        if reply.ports[idx].port_no == port_no:
+            break
+    if idx >= len(reply.ports):
+        return -1
+    mod = message.port_mod()
+    mod.port_no = port_no
+    mod.hw_addr = reply.ports[idx].hw_addr
+    mod.config = config
+    mod.mask = mask
+    mod.advertise = reply.ports[idx].advertised
+    rv = controller.message_send(mod)
+    return rv
+
+def receive_pkt_check(dp, pkt, yes_ports, no_ports, assert_if, logger,
+                      config):
+    """
+    Check for proper receive packets across all ports
+    @param dp The dataplane object
+    @param pkt Expected packet; may be None if yes_ports is empty
+    @param yes_ports Set or list of ports that should recieve packet
+    @param no_ports Set or list of ports that should not receive packet
+    @param assert_if Object that implements assertXXX
+    """
+    exp_pkt_arg = None
+    if config and config["relax"]:
+        exp_pkt_arg = pkt
+
+    for ofport in yes_ports:
+        logger.debug("Checking for pkt on port " + str(ofport))
+        (rcv_port, rcv_pkt, pkt_time) = dp.poll(
+            port_number=ofport, exp_pkt=exp_pkt_arg)
+        assert_if.assertTrue(rcv_pkt is not None, 
+                             "Did not receive pkt on " + str(ofport))
+        if not dataplane.match_exp_pkt(pkt, rcv_pkt):
+            logger.debug("Sent %s" % format_packet(pkt))
+            logger.debug("Resp %s" % format_packet(rcv_pkt))
+        assert_if.assertTrue(dataplane.match_exp_pkt(pkt, rcv_pkt),
+                             "Response packet does not match send packet " +
+                             "on port " + str(ofport))
+
+    for ofport in no_ports:
+        logger.debug("Negative check for pkt on port " + str(ofport))
+        (rcv_port, rcv_pkt, pkt_time) = dp.poll(
+            port_number=ofport, timeout=1, exp_pkt=exp_pkt_arg)
+        assert_if.assertTrue(rcv_pkt is None, 
+                             "Unexpected pkt on port " + str(ofport))
+
+
+def receive_pkt_verify(parent, egr_ports, exp_pkt, ing_port):
+    """
+    Receive a packet and verify it matches an expected value
+    @param egr_port A single port or list of ports
+
+    parent must implement dataplane, assertTrue and assertEqual
+    """
+    exp_pkt_arg = None
+    if parent.config["relax"]:
+        exp_pkt_arg = exp_pkt
+
+    if type(egr_ports) == type([]):
+        egr_port_list = egr_ports
+    else:
+        egr_port_list = [egr_ports]
+
+    # Expect a packet from each port on egr port list
+    for egr_port in egr_port_list:
+        check_port = egr_port
+        if egr_port == ofp.OFPP_IN_PORT:
+            check_port = ing_port
+        (rcv_port, rcv_pkt, pkt_time) = parent.dataplane.poll(
+            port_number=check_port, exp_pkt=exp_pkt_arg)
+
+        if rcv_pkt is None:
+            parent.logger.error("ERROR: No packet received from " + 
+                                str(check_port))
+
+        parent.assertTrue(rcv_pkt is not None,
+                          "Did not receive packet port " + str(check_port))
+        parent.logger.debug("Packet len " + str(len(rcv_pkt)) + " in on " + 
+                            str(rcv_port))
+
+        if str(exp_pkt) != str(rcv_pkt):
+            parent.logger.error("ERROR: Packet match failed.")
+            parent.logger.debug("Expected len " + str(len(exp_pkt)) + ": "
+                                + str(exp_pkt).encode('hex'))
+            parent.logger.debug("Received len " + str(len(rcv_pkt)) + ": "
+                                + str(rcv_pkt).encode('hex'))
+        parent.assertEqual(str(exp_pkt), str(rcv_pkt),
+                           "Packet match error on port " + str(check_port))
+
+def match_verify(parent, req_match, res_match):
+    """
+    Verify flow matches agree; if they disagree, report where
+
+    parent must implement assertEqual
+    Use str() to ensure content is compared and not pointers
+    """
+
+    parent.assertEqual(req_match.wildcards, res_match.wildcards,
+                       'Match failed: wildcards: ' + hex(req_match.wildcards) +
+                       " != " + hex(res_match.wildcards))
+    parent.assertEqual(req_match.in_port, res_match.in_port,
+                       'Match failed: in_port: ' + str(req_match.in_port) +
+                       " != " + str(res_match.in_port))
+    parent.assertEqual(str(req_match.dl_src), str(res_match.dl_src),
+                       'Match failed: dl_src: ' + str(req_match.dl_src) +
+                       " != " + str(res_match.dl_src))
+    parent.assertEqual(str(req_match.dl_dst), str(res_match.dl_dst),
+                       'Match failed: dl_dst: ' + str(req_match.dl_dst) +
+                       " != " + str(res_match.dl_dst))
+    parent.assertEqual(req_match.dl_vlan, res_match.dl_vlan,
+                       'Match failed: dl_vlan: ' + str(req_match.dl_vlan) +
+                       " != " + str(res_match.dl_vlan))
+    parent.assertEqual(req_match.dl_vlan_pcp, res_match.dl_vlan_pcp,
+                       'Match failed: dl_vlan_pcp: ' + 
+                       str(req_match.dl_vlan_pcp) + " != " + 
+                       str(res_match.dl_vlan_pcp))
+    parent.assertEqual(req_match.dl_type, res_match.dl_type,
+                       'Match failed: dl_type: ' + str(req_match.dl_type) +
+                       " != " + str(res_match.dl_type))
+
+    if (not(req_match.wildcards & ofp.OFPFW_DL_TYPE)
+        and (req_match.dl_type == IP_ETHERTYPE)):
+        parent.assertEqual(req_match.nw_tos, res_match.nw_tos,
+                           'Match failed: nw_tos: ' + str(req_match.nw_tos) +
+                           " != " + str(res_match.nw_tos))
+        parent.assertEqual(req_match.nw_proto, res_match.nw_proto,
+                           'Match failed: nw_proto: ' + str(req_match.nw_proto) +
+                           " != " + str(res_match.nw_proto))
+        parent.assertEqual(req_match.nw_src, res_match.nw_src,
+                           'Match failed: nw_src: ' + str(req_match.nw_src) +
+                           " != " + str(res_match.nw_src))
+        parent.assertEqual(req_match.nw_dst, res_match.nw_dst,
+                           'Match failed: nw_dst: ' + str(req_match.nw_dst) +
+                           " != " + str(res_match.nw_dst))
+
+        if (not(req_match.wildcards & ofp.OFPFW_NW_PROTO)
+            and ((req_match.nw_proto == TCP_PROTOCOL)
+                 or (req_match.nw_proto == UDP_PROTOCOL))):
+            parent.assertEqual(req_match.tp_src, res_match.tp_src,
+                               'Match failed: tp_src: ' + 
+                               str(req_match.tp_src) +
+                               " != " + str(res_match.tp_src))
+            parent.assertEqual(req_match.tp_dst, res_match.tp_dst,
+                               'Match failed: tp_dst: ' + 
+                               str(req_match.tp_dst) +
+                               " != " + str(res_match.tp_dst))
+
+def flow_removed_verify(parent, request=None, pkt_count=-1, byte_count=-1):
+    """
+    Receive a flow removed msg and verify it matches expected
+
+    @params parent Must implement controller, assertEqual
+    @param pkt_count If >= 0, verify packet count
+    @param byte_count If >= 0, verify byte count
+    """
+    (response, raw) = parent.controller.poll(ofp.OFPT_FLOW_REMOVED, 2)
+    parent.assertTrue(response is not None, 'No flow removed message received')
+
+    if request is None:
+        return
+
+    parent.assertEqual(request.cookie, response.cookie,
+                       "Flow removed cookie error: " +
+                       hex(request.cookie) + " != " + hex(response.cookie))
+
+    req_match = request.match
+    res_match = response.match
+    verifyMatchField(req_match, res_match)
+
+    if (req_match.wildcards != 0):
+        parent.assertEqual(request.priority, response.priority,
+                           'Flow remove prio mismatch: ' + 
+                           str(request,priority) + " != " + 
+                           str(response.priority))
+        parent.assertEqual(response.reason, ofp.OFPRR_HARD_TIMEOUT,
+                           'Flow remove reason is not HARD TIMEOUT:' +
+                           str(response.reason))
+        if pkt_count >= 0:
+            parent.assertEqual(response.packet_count, pkt_count,
+                               'Flow removed failed, packet count: ' + 
+                               str(response.packet_count) + " != " +
+                               str(pkt_count))
+        if byte_count >= 0:
+            parent.assertEqual(response.byte_count, byte_count,
+                               'Flow removed failed, byte count: ' + 
+                               str(response.byte_count) + " != " + 
+                               str(byte_count))
+
+def packet_to_flow_match(parent, packet):
+    match = parse.packet_to_flow_match(packet)
+    match.wildcards |= required_wildcards(parent)
+    return match
+
+def flow_msg_create(parent, pkt, ing_port=None, action_list=None, wildcards=None,
+               egr_ports=None, egr_queue=None, check_expire=False, in_band=False):
+    """
+    Create a flow message
+
+    Match on packet with given wildcards.  
+    See flow_match_test for other parameter descriptoins
+    @param egr_queue if not None, make the output an enqueue action
+    @param in_band if True, do not wildcard ingress port
+    @param egr_ports None (drop), single port or list of ports
+    """
+    match = parse.packet_to_flow_match(pkt)
+    parent.assertTrue(match is not None, "Flow match from pkt failed")
+    if wildcards is None:
+        wildcards = required_wildcards(parent)
+    if in_band:
+        wildcards &= ~ofp.OFPFW_IN_PORT
+    match.wildcards = wildcards
+    match.in_port = ing_port
+
+    if type(egr_ports) == type([]):
+        egr_port_list = egr_ports
+    else:
+        egr_port_list = [egr_ports]
+
+    request = message.flow_mod()
+    request.match = match
+    request.buffer_id = 0xffffffff
+    if check_expire:
+        request.flags |= ofp.OFPFF_SEND_FLOW_REM
+        request.hard_timeout = 1
+
+    if action_list is not None:
+        for act in action_list:
+            parent.logger.debug("Adding action " + act.show())
+            rv = request.actions.add(act)
+            parent.assertTrue(rv, "Could not add action" + act.show())
+
+    # Set up output/enqueue action if directed
+    if egr_queue is not None:
+        parent.assertTrue(egr_ports is not None, "Egress port not set")
+        act = action.action_enqueue()
+        for egr_port in egr_port_list:
+            act.port = egr_port
+            act.queue_id = egr_queue
+            rv = request.actions.add(act)
+            parent.assertTrue(rv, "Could not add enqueue action " + 
+                              str(egr_port) + " Q: " + str(egr_queue))
+    elif egr_ports is not None:
+        for egr_port in egr_port_list:
+            act = action.action_output()
+            act.port = egr_port
+            rv = request.actions.add(act)
+            parent.assertTrue(rv, "Could not add output action " + 
+                              str(egr_port))
+
+    parent.logger.debug(request.show())
+
+    return request
+
+def flow_msg_install(parent, request, clear_table_override=None):
+    """
+    Install a flow mod message in the switch
+
+    @param parent Must implement controller, assertEqual, assertTrue
+    @param request The request, all set to go
+    @param clear_table If true, clear the flow table before installing
+    """
+
+    clear_table = test_param_get(parent.config, 'clear_table', default=True)
+    if(clear_table_override != None):
+        clear_table = clear_table_override
+
+    if clear_table: 
+        parent.logger.debug("Clear flow table")
+        rc = delete_all_flows(parent.controller, parent.logger)
+        parent.assertEqual(rc, 0, "Failed to delete all flows")
+        parent.assertEqual(do_barrier(parent.controller), 0, "Barrier failed")
+
+    parent.logger.debug("Insert flow")
+    rv = parent.controller.message_send(request)
+    parent.assertTrue(rv != -1, "Error installing flow mod")
+    parent.assertEqual(do_barrier(parent.controller), 0, "Barrier failed")
+
+def flow_match_test_port_pair(parent, ing_port, egr_ports, wildcards=None,
+                              dl_vlan=-1, pkt=None, exp_pkt=None,
+                              action_list=None, check_expire=False):
+    """
+    Flow match test on single TCP packet
+    @param egr_ports A single port or list of ports
+
+    Run test with packet through switch from ing_port to egr_port
+    See flow_match_test for parameter descriptions
+    """
+
+    if wildcards is None:
+        wildcards = required_wildcards(parent)
+    parent.logger.info("Pkt match test: " + str(ing_port) + " to " + 
+                       str(egr_ports))
+    parent.logger.debug("  WC: " + hex(wildcards) + " vlan: " + str(dl_vlan) +
+                    " expire: " + str(check_expire))
+    if pkt is None:
+        pkt = simple_tcp_packet(dl_vlan_enable=(dl_vlan >= 0), dl_vlan=dl_vlan)
+
+    request = flow_msg_create(parent, pkt, ing_port=ing_port, 
+                              wildcards=wildcards, egr_ports=egr_ports,
+                              action_list=action_list)
+
+    flow_msg_install(parent, request)
+
+    parent.logger.debug("Send packet: " + str(ing_port) + " to " + 
+                        str(egr_ports))
+    parent.dataplane.send(ing_port, str(pkt))
+
+    if exp_pkt is None:
+        exp_pkt = pkt
+    receive_pkt_verify(parent, egr_ports, exp_pkt, ing_port)
+
+    if check_expire:
+        #@todo Not all HW supports both pkt and byte counters
+        flow_removed_verify(parent, request, pkt_count=1, byte_count=len(pkt))
+
+def get_egr_list(parent, of_ports, how_many, exclude_list=[]):
+    """
+    Generate a list of ports avoiding those in the exclude list
+    @param parent Supplies logger
+    @param of_ports List of OF port numbers
+    @param how_many Number of ports to be added to the list
+    @param exclude_list List of ports not to be used
+    @returns An empty list if unable to find enough ports
+    """
+
+    if how_many == 0:
+        return []
+
+    count = 0
+    egr_ports = []
+    for egr_idx in range(len(of_ports)): 
+        if of_ports[egr_idx] not in exclude_list:
+            egr_ports.append(of_ports[egr_idx])
+            count += 1
+            if count >= how_many:
+                return egr_ports
+    parent.logger.debug("Could not generate enough egress ports for test")
+    return []
+    
+def flow_match_test(parent, port_map, wildcards=None, dl_vlan=-1, pkt=None, 
+                    exp_pkt=None, action_list=None, check_expire=False, 
+                    max_test=0, egr_count=1, ing_port=False):
+    """
+    Run flow_match_test_port_pair on all port pairs
+
+    @param max_test If > 0 no more than this number of tests are executed.
+    @param parent Must implement controller, dataplane, assertTrue, assertEqual
+    and logger
+    @param pkt If not None, use this packet for ingress
+    @param wildcards For flow match entry
+    @param dl_vlan If not -1, and pkt is None, create a pkt w/ VLAN tag
+    @param exp_pkt If not None, use this as the expected output pkt; els use pkt
+    @param action_list Additional actions to add to flow mod
+    @param check_expire Check for flow expiration message
+    @param egr_count Number of egress ports; -1 means get from config w/ dflt 2
+    """
+    if wildcards is None:
+        wildcards = required_wildcards(parent)
+    of_ports = port_map.keys()
+    of_ports.sort()
+    parent.assertTrue(len(of_ports) > 1, "Not enough ports for test")
+    test_count = 0
+
+    if egr_count == -1:
+        egr_count = test_param_get(parent.config, 'egr_count', default=2)
+    
+    for ing_idx in range(len(of_ports)):
+        ingress_port = of_ports[ing_idx]
+        egr_ports = get_egr_list(parent, of_ports, egr_count, 
+                                 exclude_list=[ingress_port])
+        if ing_port:
+            egr_ports.append(ofp.OFPP_IN_PORT)
+        if len(egr_ports) == 0:
+            parent.assertTrue(0, "Failed to generate egress port list")
+
+        flow_match_test_port_pair(parent, ingress_port, egr_ports, 
+                                  wildcards=wildcards, dl_vlan=dl_vlan, 
+                                  pkt=pkt, exp_pkt=exp_pkt,
+                                  action_list=action_list,
+                                  check_expire=check_expire)
+        test_count += 1
+        if (max_test > 0) and (test_count > max_test):
+            parent.logger.info("Ran " + str(test_count) + " tests; exiting")
+            return
+
+def test_param_get(config, key, default=None):
+    """
+    Return value passed via test-params if present
+
+    @param config The configuration structure for OFTest
+    @param key The lookup key
+    @param default Default value to use if not found
+
+    If the pair 'key=val' appeared in the string passed to --test-params
+    on the command line, return val (as interpreted by exec).  Otherwise
+    return default value.
+
+    WARNING: TEST PARAMETERS MUST BE PYTHON IDENTIFIERS; 
+    eg egr_count, not egr-count.
+    """
+    try:
+        exec config["test_params"]
+    except:
+        return default
+
+    s = "val = " + str(key)
+    try:
+        exec s
+        return val
+    except:
+        return default
+
+def action_generate(parent, field_to_mod, mod_field_vals):
+    """
+    Create an action to modify the field indicated in field_to_mod
+
+    @param parent Must implement, assertTrue
+    @param field_to_mod The field to modify as a string name
+    @param mod_field_vals Hash of values to use for modified values
+    """
+
+    act = None
+
+    if field_to_mod in ['pktlen']:
+        return None
+
+    if field_to_mod == 'dl_dst':
+        act = action.action_set_dl_dst()
+        act.dl_addr = parse.parse_mac(mod_field_vals['dl_dst'])
+    elif field_to_mod == 'dl_src':
+        act = action.action_set_dl_src()
+        act.dl_addr = parse.parse_mac(mod_field_vals['dl_src'])
+    elif field_to_mod == 'dl_vlan_enable':
+        if not mod_field_vals['dl_vlan_enable']: # Strip VLAN tag
+            act = action.action_strip_vlan()
+        # Add VLAN tag is handled by dl_vlan field
+        # Will return None in this case
+    elif field_to_mod == 'dl_vlan':
+        act = action.action_set_vlan_vid()
+        act.vlan_vid = mod_field_vals['dl_vlan']
+    elif field_to_mod == 'dl_vlan_pcp':
+        act = action.action_set_vlan_pcp()
+        act.vlan_pcp = mod_field_vals['dl_vlan_pcp']
+    elif field_to_mod == 'ip_src':
+        act = action.action_set_nw_src()
+        act.nw_addr = parse.parse_ip(mod_field_vals['ip_src'])
+    elif field_to_mod == 'ip_dst':
+        act = action.action_set_nw_dst()
+        act.nw_addr = parse.parse_ip(mod_field_vals['ip_dst'])
+    elif field_to_mod == 'ip_tos':
+        act = action.action_set_nw_tos()
+        act.nw_tos = mod_field_vals['ip_tos']
+    elif field_to_mod == 'tcp_sport':
+        act = action.action_set_tp_src()
+        act.tp_port = mod_field_vals['tcp_sport']
+    elif field_to_mod == 'tcp_dport':
+        act = action.action_set_tp_dst()
+        act.tp_port = mod_field_vals['tcp_dport']
+    else:
+        parent.assertTrue(0, "Unknown field to modify: " + str(field_to_mod))
+
+    return act
+
+def pkt_action_setup(parent, start_field_vals={}, mod_field_vals={}, 
+                     mod_fields={}, check_test_params=False):
+    """
+    Set up the ingress and expected packet and action list for a test
+
+    @param parent Must implement, assertTrue, config hash and logger
+    @param start_field_values Field values to use for ingress packet (optional)
+    @param mod_field_values Field values to use for modified packet (optional)
+    @param mod_fields The list of fields to be modified by the switch in the test.
+    @params check_test_params If True, will check the parameters vid, add_vlan
+    and strip_vlan from the command line.
+
+    Returns a triple:  pkt-to-send, expected-pkt, action-list
+    """
+
+    new_actions = []
+
+    base_pkt_params = {}
+    base_pkt_params['pktlen'] = 100
+    base_pkt_params['dl_dst'] = '00:DE:F0:12:34:56'
+    base_pkt_params['dl_src'] = '00:23:45:67:89:AB'
+    base_pkt_params['dl_vlan_enable'] = False
+    base_pkt_params['dl_vlan'] = 2
+    base_pkt_params['dl_vlan_pcp'] = 0
+    base_pkt_params['ip_src'] = '192.168.0.1'
+    base_pkt_params['ip_dst'] = '192.168.0.2'
+    base_pkt_params['ip_tos'] = 0
+    base_pkt_params['tcp_sport'] = 1234
+    base_pkt_params['tcp_dport'] = 80
+    for keyname in start_field_vals.keys():
+        base_pkt_params[keyname] = start_field_vals[keyname]
+
+    mod_pkt_params = {}
+    mod_pkt_params['pktlen'] = 100
+    mod_pkt_params['dl_dst'] = '00:21:0F:ED:CB:A9'
+    mod_pkt_params['dl_src'] = '00:ED:CB:A9:87:65'
+    mod_pkt_params['dl_vlan_enable'] = False
+    mod_pkt_params['dl_vlan'] = 3
+    mod_pkt_params['dl_vlan_pcp'] = 7
+    mod_pkt_params['ip_src'] = '10.20.30.40'
+    mod_pkt_params['ip_dst'] = '50.60.70.80'
+    mod_pkt_params['ip_tos'] = 0xf0
+    mod_pkt_params['tcp_sport'] = 4321
+    mod_pkt_params['tcp_dport'] = 8765
+    for keyname in mod_field_vals.keys():
+        mod_pkt_params[keyname] = mod_field_vals[keyname]
+
+    # Check for test param modifications
+    strip = False
+    if check_test_params:
+        add_vlan = test_param_get(parent.config, 'add_vlan')
+        strip_vlan = test_param_get(parent.config, 'strip_vlan')
+        vid = test_param_get(parent.config, 'vid')
+
+        if add_vlan and strip_vlan:
+            parent.assertTrue(0, "Add and strip VLAN both specified")
+
+        if vid:
+            base_pkt_params['dl_vlan_enable'] = True
+            base_pkt_params['dl_vlan'] = vid
+            if 'dl_vlan' in mod_fields:
+                mod_pkt_params['dl_vlan'] = vid + 1
+
+        if add_vlan:
+            base_pkt_params['dl_vlan_enable'] = False
+            mod_pkt_params['dl_vlan_enable'] = True
+            mod_pkt_params['pktlen'] = base_pkt_params['pktlen'] + 4
+            mod_fields.append('pktlen')
+            mod_fields.append('dl_vlan_enable')
+            if 'dl_vlan' not in mod_fields:
+                mod_fields.append('dl_vlan')
+        elif strip_vlan:
+            base_pkt_params['dl_vlan_enable'] = True
+            mod_pkt_params['dl_vlan_enable'] = False
+            mod_pkt_params['pktlen'] = base_pkt_params['pktlen'] - 4
+            mod_fields.append('dl_vlan_enable')
+            mod_fields.append('pktlen')
+
+    # Build the ingress packet
+    ingress_pkt = simple_tcp_packet(**base_pkt_params)
+
+    # Build the expected packet, modifying the indicated fields
+    for item in mod_fields:
+        base_pkt_params[item] = mod_pkt_params[item]
+        act = action_generate(parent, item, mod_pkt_params)
+        if act:
+            new_actions.append(act)
+
+    expected_pkt = simple_tcp_packet(**base_pkt_params)
+
+    return (ingress_pkt, expected_pkt, new_actions)
+
+# Generate a simple "drop" flow mod
+# If in_band is true, then only drop from first test port
+def flow_mod_gen(port_map, in_band):
+    request = message.flow_mod()
+    request.match.wildcards = ofp.OFPFW_ALL
+    if in_band:
+        request.match.wildcards = ofp.OFPFW_ALL - ofp.OFPFW_IN_PORT
+        for of_port, ifname in port_map.items(): # Grab first port
+            break
+        request.match.in_port = of_port
+    request.buffer_id = 0xffffffff
+    return request
+
+def skip_message_emit(parent, s):
+    """
+    Print out a 'skipped' message to stderr
+
+    @param s The string to print out to the log file
+    @param parent Must implement config and logger objects
+    """
+    global skipped_test_count
+
+    skipped_test_count += 1
+    parent.logger.info("Skipping: " + s)
+    if parent.config["dbg_level"] < logging.WARNING:
+        sys.stderr.write("(skipped) ")
+    else:
+        sys.stderr.write("(S)")
+
+
+def all_stats_get(parent):
+    """
+    Get the aggregate stats for all flows in the table
+    @param parent Test instance with controller connection and assert
+    @returns dict with keys flows, packets, bytes, active (flows), 
+    lookups, matched
+    """
+    stat_req = message.aggregate_stats_request()
+    stat_req.match = ofp.ofp_match()
+    stat_req.match.wildcards = ofp.OFPFW_ALL
+    stat_req.table_id = 0xff
+    stat_req.out_port = ofp.OFPP_NONE
+
+    rv = {}
+
+    (reply, pkt) = parent.controller.transact(stat_req)
+    parent.assertTrue(len(reply.stats) == 1, "Did not receive flow stats reply")
+
+    for obj in reply.stats:
+        (rv["flows"], rv["packets"], rv["bytes"]) = (obj.flow_count, 
+                                                  obj.packet_count, obj.byte_count)
+        break
+
+    request = message.table_stats_request()
+    (reply , pkt) = parent.controller.transact(request)
+
+    
+    (rv["active"], rv["lookups"], rv["matched"]) = (0,0,0)
+    for obj in reply.stats:
+        rv["active"] += obj.active_count
+        rv["lookups"] += obj.lookup_count
+        rv["matched"] += obj.matched_count
+
+    return rv
+
+FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' 
+                for x in range(256)])
+
+def hex_dump_buffer(src, length=16):
+    """
+    Convert src to a hex dump string and return the string
+    @param src The source buffer
+    @param length The number of bytes shown in each line
+    @returns A string showing the hex dump
+    """
+    result = ["\n"]
+    for i in xrange(0, len(src), length):
+       chars = src[i:i+length]
+       hex = ' '.join(["%02x" % ord(x) for x in chars])
+       printable = ''.join(["%s" % ((ord(x) <= 127 and
+                                     FILTER[ord(x)]) or '.') for x in chars])
+       result.append("%04x  %-*s  %s\n" % (i, length*3, hex, printable))
+    return ''.join(result)
+
+def format_packet(pkt):
+    return "Packet length %d \n%s" % (len(str(pkt)), 
+                                      hex_dump_buffer(str(pkt)))