blob: ac94fb92b26f44e28ba6816b8a6599258d3a37a1 [file] [log] [blame]
#!/usr/bin/env python
"""
@package oft
OpenFlow test framework top level script
This script is the entry point for running OpenFlow tests
using the OFT framework.
The global configuration is passed around in a dictionary
generally called config. The keys have the following
significance.
<pre>
platform : String identifying the target platform
controller_host : Host on which test controller is running (for sockets)
controller_port : Port on which test controller listens for switch cxn
test_dir : (TBD) Directory to search for test files (default .)
test_spec : (TBD) Specification of test(s) to run
log_file : Filename for test logging
list : Boolean: List all tests and exit
debug : String giving debug level (info, warning, error...)
</pre>
See config_defaults below for the default values.
The following are stored in the config dictionary, but are not currently
configurable through the command line.
<pre>
dbg_level : logging module value of debug level
port_map : Map of dataplane OpenFlow port to OS interface names
</pre>
Each test may be assigned a priority by setting the "priority" property
in the class definition. For now, the only use of this is to avoid
automatic inclusion of tests into the default list. This is done by
setting the priority value less than 0. Eventually we may add ordering
of test execution by test priority.
To add a test to the system, either: edit an existing test case file (like
basic.py) to add a test class which inherits from unittest.TestCase (directly
or indirectly); or add a new file with the test case class. Preferably the
file is in the same directory as existing tests, though you can specify the
directory on the command line. The file should not be called "all" as that's
reserved for the test-spec.
TBD: To add configuration to the system, first add an entry to config_default
below. If you want this to be a command line parameter, edit config_setup
to add the option and default value to the parser. Then edit config_get
to make sure the option value gets copied into the configuration
structure (which then gets passed to everyone else).
By convention, oft attempts to import the contents of a file by the
name of $platform.py into the local namespace.
IMPORTANT: That file should define a function platform_config_update which
takes a configuration dictionary as an argument and updates it for the
current run. In particular, it should set up config["port_map"] with
the proper map from OF port numbers to OF interface names.
You can add your own platform, say gp104, by adding a file gp104.py to the
platforms directory that defines the function platform_config_update and then
use the parameter --platform=gp104 on the command line. You can also use the
--platform-dir option to change which directory is searched.
The current model for test sets is basic.py.
Default setup:
The default setup runs locally using veth pairs. To exercise this,
checkout and build an openflow userspace datapath. Then start it on
the local host:
<pre>
sudo ~/openflow/regress/bin/veth_setup.pl
sudo ofdatapath -i veth0,veth2,veth4,veth6 punix:/tmp/ofd &
sudo ofprotocol unix:/tmp/ofd tcp:127.0.0.1 --fail=closed --max-backoff=1 &
Next, run oft:
sudo ./oft --debug=info
</pre>
Examine oft.log if things don't work.
@todo Support per-component debug levels (esp controller vs dataplane)
@todo Allow specification of priority to override prio check
Current test case setup:
File with the .py extension in the test directory are considered test files.
Support a command line option --test-spec to choose the tests to run.
Support test-spec "all" to specify all tests.
"""
import sys
from optparse import OptionParser
from subprocess import Popen,PIPE
import logging
import unittest
import time
import os
import imp
import random
import signal
import fnmatch
root_dir = os.path.dirname(os.path.realpath(__file__))
pydir = os.path.join(root_dir, 'src', 'python')
if os.path.exists(os.path.join(pydir, 'oftest')):
# Running from source tree
sys.path.insert(0, pydir)
import oftest
from oftest import config
try:
import oftest.message
except:
sys.exit("Missing OpenFlow message classes: please run \"make -C tools/munger\"")
import oftest.testutils
import oftest.ofutils
try:
import scapy.all as scapy
except:
try:
import scapy as scapy
except:
sys.exit("Need to install scapy for packet parsing")
##@var Profile module
profile_mod = None
##@var DEBUG_LEVELS
# Map from strings to debugging levels
DEBUG_LEVELS = {
'debug' : logging.DEBUG,
'verbose' : logging.DEBUG,
'info' : logging.INFO,
'warning' : logging.WARNING,
'warn' : logging.WARNING,
'error' : logging.ERROR,
'critical' : logging.CRITICAL
}
_debug_default = "warning"
_debug_level_default = DEBUG_LEVELS[_debug_default]
##@var config_default
# The default configuration dictionary for OFT
config_default = {
"param" : None,
"platform" : "local",
"platform_args" : None,
"switch_ip" : None, # If not none, actively connect to switch
"controller_host" : "0.0.0.0", # For passive bind
"controller_port" : 6633,
"relax" : False,
"test_spec" : "",
"log_file" : "oft.log",
"log_append" : False,
"list" : False,
"list_test_names" : False,
"debug" : _debug_default,
"dbg_level" : _debug_level_default,
"port_map" : {},
"test_params" : "None",
"profile" : "default",
"allow_user" : False,
"fail_skipped" : False,
"default_timeout" : 2,
"minsize" : 0,
"random_seed" : None,
"test_dir" : os.path.join(root_dir, "tests"),
"platform_dir" : os.path.join(root_dir, "platforms"),
"profile_dir" : os.path.join(root_dir, "profiles"),
"priority" : 0,
}
#@todo Set up a dict of config params so easier to manage:
# <param> <cmdline flags> <default value> <help> <optional parser>
# Map options to config structure
def config_get(opts):
"Convert options class to OFT configuration dictionary"
cfg = config_default.copy()
for key in cfg.keys():
cfg[key] = getattr(opts, key)
# Special case checks
if opts.debug not in DEBUG_LEVELS.keys():
print "Warning: Bad value specified for debug level; using default"
opts.debug = _debug_default
if opts.verbose:
cfg["debug"] = "verbose"
cfg["dbg_level"] = DEBUG_LEVELS[cfg["debug"]]
return cfg
def config_setup(cfg_dflt):
"""
Set up the configuration including parsing the arguments
@param cfg_dflt The default configuration dictionary
@return A pair (config, args) where config is an config
object and args is any additional arguments from the command line
"""
usage = "usage: %prog [options] (test|group)..."
description = """
OFTest is a framework and set of tests for validating OpenFlow switches.
The default configuration assumes that an OpenFlow 1.0 switch is attempting to
connect to a controller on the machine running OFTest, port 6633. Additionally,
the interfaces veth1, veth3, veth5, and veth7 should be connected to the switch's
dataplane.
If no positional arguments are given then OFTest will run all tests that
depend only on standard OpenFlow 1.0. Otherwise each positional argument
is interpreted as either a test name or a test group name. The union of
these will be executed. To see what groups each test belongs to use the
--list option.
"""
parser = OptionParser(version="%prog 0.1",
usage=usage,
description=description)
#@todo parse port map as option?
# Set up default values
parser.set_defaults(**cfg_dflt)
#@todo Add options via dictionary
plat_help = """Set the platform type. Valid values include:
local: User space virtual ethernet pair setup
remote: Remote embedded Broadcom based switch
Create a new_plat.py file and use --platform=new_plat on the command line
"""
parser.add_option("-a", "--platform-args", help="Custom arguments per platform.")
parser.add_option("-P", "--platform", help=plat_help)
parser.add_option("-H", "--host", dest="controller_host",
help="The IP/name of the test controller host")
parser.add_option("-S", "--switch-ip", dest="switch_ip",
help="If set, actively connect to this switch by IP")
parser.add_option("-p", "--port", dest="controller_port",
type="int", help="Port number of the test controller")
test_list_help = """Indicate tests to run. Valid entries are "all" (the
default) or a comma separated list of:
group Run all tests in the named group
module.testcase Run the specific test case
"""
parser.add_option("-T", "--test-spec", "--test-list", help=test_list_help)
parser.add_option("--log-file",
help="Name of log file, empty string to log to console")
parser.add_option("--log-append", action="store_true",
help="Do not delete log file if specified")
parser.add_option("--debug",
help="Debug lvl: debug, info, warning, error, critical")
parser.add_option("--list-test-names", action='store_true',
help="List test names matching the test spec and exit", default=False)
parser.add_option("--list", action="store_true",
help="List all tests and exit")
parser.add_option("-v", "--verbose", action="store_true",
help="Short cut for --debug=verbose")
parser.add_option("--relax", action="store_true",
help="Relax packet match checks allowing other packets")
parser.add_option("--profile",
help="File listing tests to skip/run")
parser.add_option("-t", "--test-params",
help="""Set test parameters: key=val;...
NOTE: key MUST be a valid Python identifier, egr_count not egr-count
See --list""")
parser.add_option("--allow-user", action="store_true",
help="Proceed even if oftest is not run as root")
parser.add_option("--fail-skipped", action="store_true",
help="Return failure if any test was skipped")
parser.add_option("--default-timeout", type="int",
help="Timeout in seconds for most operations")
parser.add_option("--minsize", type="int",
help="Minimum allowable packet size on the dataplane.",
default=0)
parser.add_option("--random-seed", type="int",
help="Random number generator seed",
default=None)
parser.add_option("--test-dir", type="string",
help="Directory containing tests")
parser.add_option("--platform-dir", type="string",
help="Directory containing platform modules")
parser.add_option("--profile-dir", type="string",
help="Directory containing profile modules")
# Might need this if other parsers want command line
# parser.allow_interspersed_args = False
(options, args) = parser.parse_args()
config = config_get(options)
return (config, args)
def load_profile(config):
"""
Import a profile from the profiles library
"""
global profile_mod
logging.info("Importing profile: %s" % config["profile"])
try:
profile_mod = imp.load_module(config["profile"], *imp.find_module(config["profile"], [config["profile_dir"]]))
if not "skip_test_list" in dir(profile_mod):
die("Profile did not define skip_test_list")
except:
logging.info("Could not import profile: %s.py" % config["profile"])
print "Failed to import profile: %s" % config["profile"]
raise
def logging_setup(config):
"""
Set up logging based on config
"""
_format = "%(asctime)s.%(msecs)d %(name)-10s: %(levelname)-8s: %(message)s"
_datefmt = "%H:%M:%S"
_mode = config["log_append"] and "a" or "w"
logging.basicConfig(filename=config["log_file"],
filemode=_mode,
level=config["dbg_level"],
format=_format, datefmt=_datefmt)
def load_test_modules(config):
"""
Load tests from the test_dir directory.
Test cases are subclasses of unittest.TestCase
Also updates the _groups member to include "standard" and
module test groups if appropriate.
@param config The oft configuration dictionary
@returns A dictionary from test module names to tuples of
(module, dictionary from test names to test classes).
"""
result = {}
for root, dirs, filenames in os.walk(config["test_dir"]):
# Iterate over each python file
for filename in fnmatch.filter(filenames, '[!.]*.py'):
modname = os.path.splitext(os.path.basename(filename))[0]
try:
if sys.modules.has_key(modname):
mod = sys.modules[modname]
else:
mod = imp.load_module(modname, *imp.find_module(modname, [root]))
except:
logging.warning("Could not import file " + filename)
raise
# Find all testcases defined in the module
tests = dict((k, v) for (k, v) in mod.__dict__.items() if type(v) == type and
issubclass(v, unittest.TestCase))
if tests:
for (testname, test) in tests.items():
# Set default annotation values
if not hasattr(test, "_groups"):
test._groups = []
if not hasattr(test, "_nonstandard"):
test._nonstandard = False
if not hasattr(test, "_disabled"):
test._disabled = False
# Put test in its module's test group
if not test._disabled:
test._groups.append(modname)
# Put test in the standard test group
if not test._disabled and not test._nonstandard:
test._groups.append("standard")
test._groups.append("all") # backwards compatibility
result[modname] = (mod, tests)
return result
def prune_tests(test_specs, test_modules):
"""
Return tests matching the given test-specs
@param test_specs A list of group names or test names.
@param test_modules Same format as the output of load_test_modules.
@returns Same format as the output of load_test_modules.
"""
result = {}
for e in test_specs:
matched = False
for (modname, (mod, tests)) in test_modules.items():
for (testname, test) in tests.items():
if e in test._groups or e == "%s.%s" % (modname, testname):
result.setdefault(modname, (mod, {}))
result[modname][1][testname] = test
matched = True
if not matched:
die("test-spec element %s did not match any tests" % e)
return result
def die(msg, exit_val=1):
print msg
logging.critical(msg)
sys.exit(exit_val)
def _space_to(n, str):
"""
Generate a string of spaces to achieve width n given string str
If length of str >= n, return one space
"""
spaces = n - len(str)
if spaces > 0:
return " " * spaces
return " "
#
# Main script
#
# Setup global configuration
(new_config, args) = config_setup(config_default)
oftest.config.update(new_config)
logging_setup(config)
logging.info("++++++++ " + time.asctime() + " ++++++++")
# Allow tests to import each other
sys.path.append(config["test_dir"])
test_specs = args
if config["test_spec"] != "":
print >> sys.stderr, "WARNING: The --test-spec option is deprecated"
test_specs += config["test_spec"].split(',')
if test_specs == []:
test_specs = ["standard"]
test_modules = load_test_modules(config)
load_profile(config)
# Check if test list is requested; display and exit if so
if config["list"]:
mod_count = 0
test_count = 0
print """\
Tests are shown grouped by module. If a test is in any groups beyond "standard"
and its module's group then they are shown in parentheses."""
print
print """\
Tests marked with '*' are non-standard and may require vendor extensions or
special switch configuration. These are not part of the "standard" test group."""
print
print """\
Tests marked with '!' are disabled because they are experimental, special-purpose,
or are too long to be run normally. These are not part of the "standard" test
group or their module's test group."""
print
print "Tests marked (TP1) after name take --test-params including:"
print " 'vid=N;strip_vlan=bool;add_vlan=bool'"
print "Note that --profile may override which tests are run"
print
print "Test List:"
for (modname, (mod, tests)) in test_modules.items():
mod_count += 1
desc = (mod.__doc__ or "").strip()
desc = desc.split('\n')[0]
start_str = " Module " + mod.__name__ + ": "
print start_str + _space_to(22, start_str) + desc
for (testname, test) in tests.items():
try:
desc = (test.__doc__ or "").strip()
desc = desc.split('\n')[0]
except:
desc = "No description"
groups = set(test._groups) - set(["all", "standard", modname])
if groups:
desc = "(%s) %s" % (",".join(groups), desc)
start_str = " %s%s %s:" % (test._nonstandard and "*" or " ",
test._disabled and "!" or " ",
testname)
if len(start_str) > 22:
desc = "\n" + _space_to(22, "") + desc
print start_str + _space_to(22, start_str) + desc
test_count += 1
print
print "%d modules shown with a total of %d tests" % \
(mod_count, test_count)
sys.exit(0)
test_modules = prune_tests(test_specs, test_modules)
# Check if test list is requested; display and exit if so
if config["list_test_names"]:
for (modname, (mod, tests)) in test_modules.items():
for (testname, test) in tests.items():
print "%s.%s" % (modname, testname)
sys.exit(0)
# Generate the test suite
#@todo Decide if multiple suites are ever needed
suite = unittest.TestSuite()
for (modname, (mod, tests)) in test_modules.items():
for (testname, test) in tests.items():
logging.info("Adding test " + modname + "." + testname)
suite.addTest(test())
# Allow platforms to import each other
sys.path.append(config["platform_dir"])
# Load the platform module
platform_name = config["platform"]
logging.info("Importing platform: " + platform_name)
platform_mod = None
try:
platform_mod = imp.load_module(platform_name, *imp.find_module(platform_name, [config["platform_dir"]]))
except:
logging.warn("Failed to import " + platform_name + " platform module")
raise
try:
platform_mod.platform_config_update(config)
except:
logging.warn("Could not run platform host configuration")
raise
if not config["port_map"]:
die("Interface port map was not defined by the platform. Exiting.")
logging.debug("Configuration: " + str(config))
logging.info("OF port map: " + str(config["port_map"]))
if config["dbg_level"] == logging.CRITICAL:
_verb = 0
elif config["dbg_level"] >= logging.WARNING:
_verb = 1
else:
_verb = 2
oftest.ofutils.default_timeout = config["default_timeout"]
oftest.testutils.MINSIZE = config['minsize']
if os.getuid() != 0 and not config["allow_user"]:
print "ERROR: Super-user privileges required. Please re-run with " \
"sudo or as root."
sys.exit(1)
if config["random_seed"] is not None:
logging.info("Random seed: %d" % config["random_seed"])
random.seed(config["random_seed"])
# Remove python's signal handler which raises KeyboardError. Exiting from an
# exception waits for all threads to terminate which might not happen.
signal.signal(signal.SIGINT, signal.SIG_DFL)
if __name__ == "__main__":
# Set up the dataplane
oftest.dataplane_instance = oftest.dataplane.DataPlane(config)
for of_port, ifname in config["port_map"].items():
oftest.dataplane_instance.port_add(ifname, of_port)
logging.info("*** TEST RUN START: " + time.asctime())
result = unittest.TextTestRunner(verbosity=_verb).run(suite)
if oftest.testutils.skipped_test_count > 0:
ts = " tests"
if oftest.testutils.skipped_test_count == 1: ts = " test"
logging.info("Skipped " + str(oftest.testutils.skipped_test_count) + ts)
print("Skipped " + str(oftest.testutils.skipped_test_count) + ts)
logging.info("*** TEST RUN END : " + time.asctime())
# Shutdown the dataplane
oftest.dataplane_instance.kill()
oftest.dataplane_instance = None
if result.failures or result.errors:
# exit(1) hangs sometimes
os._exit(1)
if oftest.testutils.skipped_test_count > 0 and config["fail_skipped"]:
os._exit(1)