| #!/usr/bin/env python |
| """ |
| @package oft |
| |
| OpenFlow test framework top level script |
| |
| This script is the entry point for running OpenFlow tests |
| using the OFT framework. |
| |
| The global configuration is passed around in a dictionary |
| generally called config. The keys have the following |
| significance. |
| |
| <pre> |
| platform : String identifying the target platform |
| controller_host : Host on which test controller is running (for sockets) |
| controller_port : Port on which test controller listens for switch cxn |
| test_dir : (TBD) Directory to search for test files (default .) |
| test_spec : (TBD) Specification of test(s) to run |
| log_file : Filename for test logging |
| list : Boolean: List all tests and exit |
| debug : String giving debug level (info, warning, error...) |
| </pre> |
| |
| See config_defaults below for the default values. |
| |
| The following are stored in the config dictionary, but are not currently |
| configurable through the command line. |
| |
| <pre> |
| dbg_level : logging module value of debug level |
| port_map : Map of dataplane OpenFlow port to OS interface names |
| </pre> |
| |
| Each test may be assigned a priority by setting the "priority" property |
| in the class definition. For now, the only use of this is to avoid |
| automatic inclusion of tests into the default list. This is done by |
| setting the priority value less than 0. Eventually we may add ordering |
| of test execution by test priority. |
| |
| To add a test to the system, either: edit an existing test case file (like |
| basic.py) to add a test class which inherits from unittest.TestCase (directly |
| or indirectly); or add a new file with the test case class. Preferably the |
| file is in the same directory as existing tests, though you can specify the |
| directory on the command line. The file should not be called "all" as that's |
| reserved for the test-spec. |
| |
| TBD: To add configuration to the system, first add an entry to config_default |
| below. If you want this to be a command line parameter, edit config_setup |
| to add the option and default value to the parser. Then edit config_get |
| to make sure the option value gets copied into the configuration |
| structure (which then gets passed to everyone else). |
| |
| By convention, oft attempts to import the contents of a file by the |
| name of $platform.py into the local namespace. |
| |
| IMPORTANT: That file should define a function platform_config_update which |
| takes a configuration dictionary as an argument and updates it for the |
| current run. In particular, it should set up config["port_map"] with |
| the proper map from OF port numbers to OF interface names. |
| |
| You can add your own platform, say gp104, by adding a file gp104.py to the |
| platforms directory that defines the function platform_config_update and then |
| use the parameter --platform=gp104 on the command line. You can also use the |
| --platform-dir option to change which directory is searched. |
| |
| The current model for test sets is basic.py. |
| |
| Default setup: |
| |
| The default setup runs locally using veth pairs. To exercise this, |
| checkout and build an openflow userspace datapath. Then start it on |
| the local host: |
| <pre> |
| sudo ~/openflow/regress/bin/veth_setup.pl |
| sudo ofdatapath -i veth0,veth2,veth4,veth6 punix:/tmp/ofd & |
| sudo ofprotocol unix:/tmp/ofd tcp:127.0.0.1 --fail=closed --max-backoff=1 & |
| |
| Next, run oft: |
| sudo ./oft --debug=info |
| </pre> |
| |
| Examine oft.log if things don't work. |
| |
| @todo Support per-component debug levels (esp controller vs dataplane) |
| @todo Allow specification of priority to override prio check |
| |
| Current test case setup: |
| File with the .py extension in the test directory are considered test files. |
| Support a command line option --test-spec to choose the tests to run. |
| Support test-spec "all" to specify all tests. |
| """ |
| |
| import sys |
| from optparse import OptionParser |
| from subprocess import Popen,PIPE |
| import logging |
| import unittest |
| import time |
| import os |
| import imp |
| import random |
| import signal |
| import fnmatch |
| |
| root_dir = os.path.dirname(os.path.realpath(__file__)) |
| |
| pydir = os.path.join(root_dir, 'src', 'python') |
| if os.path.exists(os.path.join(pydir, 'oftest')): |
| # Running from source tree |
| sys.path.insert(0, pydir) |
| |
| import oftest |
| from oftest import config |
| |
| try: |
| import oftest.message |
| except: |
| sys.exit("Missing OpenFlow message classes: please run \"make -C tools/munger\"") |
| |
| import oftest.testutils |
| import oftest.ofutils |
| |
| try: |
| import scapy.all as scapy |
| except: |
| try: |
| import scapy as scapy |
| except: |
| sys.exit("Need to install scapy for packet parsing") |
| |
| ##@var Profile module |
| profile_mod = None |
| |
| ##@var DEBUG_LEVELS |
| # Map from strings to debugging levels |
| DEBUG_LEVELS = { |
| 'debug' : logging.DEBUG, |
| 'verbose' : logging.DEBUG, |
| 'info' : logging.INFO, |
| 'warning' : logging.WARNING, |
| 'warn' : logging.WARNING, |
| 'error' : logging.ERROR, |
| 'critical' : logging.CRITICAL |
| } |
| |
| _debug_default = "warning" |
| _debug_level_default = DEBUG_LEVELS[_debug_default] |
| |
| ##@var config_default |
| # The default configuration dictionary for OFT |
| config_default = { |
| "param" : None, |
| "platform" : "local", |
| "platform_args" : None, |
| "switch_ip" : None, # If not none, actively connect to switch |
| "controller_host" : "0.0.0.0", # For passive bind |
| "controller_port" : 6633, |
| "relax" : False, |
| "test_spec" : "all", |
| "log_file" : "oft.log", |
| "log_append" : False, |
| "list" : False, |
| "list_test_names" : False, |
| "debug" : _debug_default, |
| "dbg_level" : _debug_level_default, |
| "port_map" : {}, |
| "test_params" : "None", |
| "profile" : "default", |
| "allow_user" : False, |
| "fail_skipped" : False, |
| "default_timeout" : 2, |
| "minsize" : 0, |
| "random_seed" : None, |
| "test_dir" : os.path.join(root_dir, "tests"), |
| "platform_dir" : os.path.join(root_dir, "platforms"), |
| "profile_dir" : os.path.join(root_dir, "profiles"), |
| "priority" : 0, |
| } |
| |
| # Default test priority |
| TEST_PRIO_DEFAULT=100 |
| TEST_PRIO_SKIP=-1 |
| |
| #@todo Set up a dict of config params so easier to manage: |
| # <param> <cmdline flags> <default value> <help> <optional parser> |
| |
| # Map options to config structure |
| def config_get(opts): |
| "Convert options class to OFT configuration dictionary" |
| cfg = config_default.copy() |
| for key in cfg.keys(): |
| cfg[key] = getattr(opts, key) |
| |
| # Special case checks |
| if opts.debug not in DEBUG_LEVELS.keys(): |
| print "Warning: Bad value specified for debug level; using default" |
| opts.debug = _debug_default |
| if opts.verbose: |
| cfg["debug"] = "verbose" |
| cfg["dbg_level"] = DEBUG_LEVELS[cfg["debug"]] |
| |
| return cfg |
| |
| def config_setup(cfg_dflt): |
| """ |
| Set up the configuration including parsing the arguments |
| |
| @param cfg_dflt The default configuration dictionary |
| @return A pair (config, args) where config is an config |
| object and args is any additional arguments from the command line |
| """ |
| |
| parser = OptionParser(version="%prog 0.1") |
| |
| #@todo parse port map as option? |
| # Set up default values |
| parser.set_defaults(**cfg_dflt) |
| |
| #@todo Add options via dictionary |
| plat_help = """Set the platform type. Valid values include: |
| local: User space virtual ethernet pair setup |
| remote: Remote embedded Broadcom based switch |
| Create a new_plat.py file and use --platform=new_plat on the command line |
| """ |
| parser.add_option("-a", "--platform-args", help="Custom arguments per platform.") |
| parser.add_option("-P", "--platform", help=plat_help) |
| parser.add_option("-H", "--host", dest="controller_host", |
| help="The IP/name of the test controller host") |
| parser.add_option("-S", "--switch-ip", dest="switch_ip", |
| help="If set, actively connect to this switch by IP") |
| parser.add_option("-p", "--port", dest="controller_port", |
| type="int", help="Port number of the test controller") |
| test_list_help = """Indicate tests to run. Valid entries are "all" (the |
| default) or a comma separated list of: |
| module Run all tests in the named module |
| testcase Run tests in all modules with the name testcase |
| module.testcase Run the specific test case |
| """ |
| parser.add_option("-T", "--test-spec", "--test-list", help=test_list_help) |
| parser.add_option("--log-file", |
| help="Name of log file, empty string to log to console") |
| parser.add_option("--log-append", action="store_true", |
| help="Do not delete log file if specified") |
| parser.add_option("--debug", |
| help="Debug lvl: debug, info, warning, error, critical") |
| parser.add_option("--port-count", type="int", |
| help="Number of ports to use (optional)") |
| parser.add_option("--base-of-port", type="int", |
| help="Base OpenFlow port number (optional)") |
| parser.add_option("--base-if-index", type="int", |
| help="Base interface index number (optional)") |
| parser.add_option("--list-test-names", action='store_true', |
| help="List only test names.", default=False) |
| parser.add_option("--list", action="store_true", |
| help="List all tests and exit") |
| parser.add_option("-v", "--verbose", action="store_true", |
| help="Short cut for --debug=verbose") |
| parser.add_option("--relax", action="store_true", |
| help="Relax packet match checks allowing other packets") |
| parser.add_option("--param", type="int", |
| help="Parameter sent to test (for debugging)") |
| parser.add_option("--profile", |
| help="File listing tests to skip/run") |
| parser.add_option("-t", "--test-params", |
| help="""Set test parameters: key=val;... |
| NOTE: key MUST be a valid Python identifier, egr_count not egr-count |
| See --list""") |
| parser.add_option("--allow-user", action="store_true", |
| help="Proceed even if oftest is not run as root") |
| parser.add_option("--fail-skipped", action="store_true", |
| help="Return failure if any test was skipped") |
| parser.add_option("--default-timeout", type="int", |
| help="Timeout in seconds for most operations") |
| parser.add_option("--minsize", type="int", |
| help="Minimum allowable packet size on the dataplane.", |
| default=0) |
| parser.add_option("--random-seed", type="int", |
| help="Random number generator seed", |
| default=None) |
| parser.add_option("--test-dir", type="string", |
| help="Directory containing tests") |
| parser.add_option("--platform-dir", type="string", |
| help="Directory containing platform modules") |
| parser.add_option("--profile-dir", type="string", |
| help="Directory containing profile modules") |
| parser.add_option("--priority", type="int", |
| help="Minimum test priority", |
| default=0) |
| |
| # Might need this if other parsers want command line |
| # parser.allow_interspersed_args = False |
| (options, args) = parser.parse_args() |
| |
| config = config_get(options) |
| |
| return (config, args) |
| |
| def load_profile(config): |
| """ |
| Import a profile from the profiles library |
| """ |
| |
| global profile_mod |
| logging.info("Importing profile: %s" % config["profile"]) |
| try: |
| profile_mod = imp.load_module(config["profile"], *imp.find_module(config["profile"], [config["profile_dir"]])) |
| if not "skip_test_list" in dir(profile_mod): |
| die("Profile did not define skip_test_list") |
| except: |
| logging.info("Could not import profile: %s.py" % config["profile"]) |
| print "Failed to import profile: %s" % config["profile"] |
| raise |
| |
| def logging_setup(config): |
| """ |
| Set up logging based on config |
| """ |
| _format = "%(asctime)s.%(msecs)d %(name)-10s: %(levelname)-8s: %(message)s" |
| _datefmt = "%H:%M:%S" |
| _mode = config["log_append"] and "a" or "w" |
| logging.basicConfig(filename=config["log_file"], |
| filemode=_mode, |
| level=config["dbg_level"], |
| format=_format, datefmt=_datefmt) |
| |
| def load_test_modules(config): |
| """ |
| Load tests from the test_dir directory. |
| |
| Test cases are subclasses of unittest.TestCase |
| |
| @param config The oft configuration dictionary |
| @returns A dictionary from test module names to tuples of |
| (module, dictionary from test names to test classes). |
| """ |
| |
| result = {} |
| |
| for root, dirs, filenames in os.walk(config["test_dir"]): |
| # Iterate over each python file |
| for filename in fnmatch.filter(filenames, '[!.]*.py'): |
| modname = os.path.splitext(os.path.basename(filename))[0] |
| |
| try: |
| if sys.modules.has_key(modname): |
| mod = sys.modules[modname] |
| else: |
| mod = imp.load_module(modname, *imp.find_module(modname, [root])) |
| except: |
| logging.warning("Could not import file " + filename) |
| raise |
| |
| # Find all testcases defined in the module |
| tests = dict((k, v) for (k, v) in mod.__dict__.items() if type(v) == type and |
| issubclass(v, unittest.TestCase)) |
| if tests: |
| result[modname] = (mod, tests) |
| |
| return result |
| |
| def prune_tests(test_spec, test_modules): |
| """ |
| Return tests matching a given test-spec. |
| @param test_spec A test-spec string. |
| @param test_modules Same format as the output of load_test_modules. |
| @returns Same format as the output of load_test_modules. |
| """ |
| result = {} |
| for (spec_modname, spec_testname) in parse_test_spec(test_spec): |
| matched = False |
| for (modname, (mod, tests)) in test_modules.items(): |
| if (spec_modname == None or spec_modname == modname): |
| for (testname, test) in tests.items(): |
| if (spec_testname == None or spec_testname == testname): |
| result.setdefault(modname, (mod, {})) |
| result[modname][1][testname] = test |
| matched = True |
| if not matched: |
| if spec_modname and spec_testname: |
| el = "%s.%s" % (spec_modname, spec_testname) |
| else: |
| el = spec_modname or spec_testname or "all" |
| die("test-spec element %s did not match any tests" % el) |
| return result |
| |
| def parse_test_spec(test_spec): |
| """ |
| The input string is split on commas and each element is parsed |
| individually into a module name and test name. Either may be None |
| for a wildcard. The case of the first letter resolves ambiguity |
| of whether a word is a test or module name. The special string |
| "all" results in both fields wildcarded. |
| |
| Examples: |
| basic.Echo -> ("basic", "Echo") |
| basic -> ("basic", None) |
| Echo -> (None, "Echo") |
| all -> (None, None) |
| """ |
| results = [] |
| for ts_entry in test_spec.split(","): |
| parts = ts_entry.split(".") |
| if len(parts) == 1: |
| if ts_entry == "all": |
| results.append((None, None)) |
| elif ts_entry[0].isupper(): |
| results.append((None, ts_entry)) |
| else: |
| results.append((ts_entry, None)) |
| elif len(parts) == 2: |
| results.append((parts[0], parts[1])) |
| else: |
| die("Bad test spec: " + ts_entry) |
| return results |
| |
| def die(msg, exit_val=1): |
| print msg |
| logging.critical(msg) |
| sys.exit(exit_val) |
| |
| def _space_to(n, str): |
| """ |
| Generate a string of spaces to achieve width n given string str |
| If length of str >= n, return one space |
| """ |
| spaces = n - len(str) |
| if spaces > 0: |
| return " " * spaces |
| return " " |
| |
| def test_prio_get(test): |
| """ |
| Return the priority of a test |
| |
| If test is in "skip list" from profile, return the skip value |
| |
| If the priority property is set in the class, return |
| that value. Otherwise return 100 (default) |
| """ |
| if test.__name__ in profile_mod.skip_test_list: |
| logging.info("Skipping test %s due to profile" % test.__name__) |
| return TEST_PRIO_SKIP |
| if test.__name__ in profile_mod.run_test_list: |
| logging.info("Add test %s due to profile" % test.__name__) |
| return TEST_PRIO_DEFAULT |
| return getattr(test, "priority", TEST_PRIO_DEFAULT) |
| |
| # |
| # Main script |
| # |
| |
| # Setup global configuration |
| (new_config, args) = config_setup(config_default) |
| oftest.config.update(new_config) |
| |
| logging_setup(config) |
| logging.info("++++++++ " + time.asctime() + " ++++++++") |
| |
| # Allow tests to import each other |
| sys.path.append(config["test_dir"]) |
| |
| test_modules = prune_tests(config["test_spec"], load_test_modules(config)) |
| |
| load_profile(config) |
| |
| # Check if test list is requested; display and exit if so |
| if config["list"]: |
| mod_count = 0 |
| test_count = 0 |
| print "\nTest List:" |
| for (modname, (mod, tests)) in test_modules.items(): |
| mod_count += 1 |
| desc = (mod.__doc__ or "").strip() |
| desc = desc.split('\n')[0] |
| start_str = " Module " + mod.__name__ + ": " |
| print start_str + _space_to(22, start_str) + desc |
| for (testname, test) in tests.items(): |
| try: |
| desc = (test.__doc__ or "").strip() |
| desc = desc.split('\n')[0] |
| except: |
| desc = "No description" |
| if test_prio_get(test) < config["priority"]: |
| start_str = " * " + testname + ":" |
| else: |
| start_str = " " + testname + ":" |
| if len(start_str) > 22: |
| desc = "\n" + _space_to(22, "") + desc |
| print start_str + _space_to(22, start_str) + desc |
| test_count += 1 |
| print |
| print "%d modules shown with a total of %d tests" % \ |
| (mod_count, test_count) |
| print |
| print "Tests preceded by * are not run by default" |
| print "Tests marked (TP1) after name take --test-params including:" |
| print " 'vid=N;strip_vlan=bool;add_vlan=bool'" |
| print "Note that --profile may override which tests are run" |
| sys.exit(0) |
| |
| # Check if test list is requested; display and exit if so |
| if config["list_test_names"]: |
| for (modname, (mod, tests)) in test_modules.items(): |
| for (testname, test) in tests.items(): |
| if test_prio_get(test) >= config["priority"]: |
| print "%s.%s" % (modname, testname) |
| sys.exit(0) |
| |
| # Generate the test suite |
| #@todo Decide if multiple suites are ever needed |
| suite = unittest.TestSuite() |
| |
| for (modname, (mod, tests)) in test_modules.items(): |
| for (testname, test) in tests.items(): |
| if test_prio_get(test) >= config["priority"]: |
| logging.info("Adding test " + modname + "." + testname) |
| suite.addTest(test()) |
| |
| # Allow platforms to import each other |
| sys.path.append(config["platform_dir"]) |
| |
| # Load the platform module |
| platform_name = config["platform"] |
| logging.info("Importing platform: " + platform_name) |
| platform_mod = None |
| try: |
| platform_mod = imp.load_module(platform_name, *imp.find_module(platform_name, [config["platform_dir"]])) |
| except: |
| logging.warn("Failed to import " + platform_name + " platform module") |
| raise |
| |
| try: |
| platform_mod.platform_config_update(config) |
| except: |
| logging.warn("Could not run platform host configuration") |
| raise |
| |
| if not config["port_map"]: |
| die("Interface port map was not defined by the platform. Exiting.") |
| |
| logging.debug("Configuration: " + str(config)) |
| logging.info("OF port map: " + str(config["port_map"])) |
| |
| if config["dbg_level"] == logging.CRITICAL: |
| _verb = 0 |
| elif config["dbg_level"] >= logging.WARNING: |
| _verb = 1 |
| else: |
| _verb = 2 |
| |
| oftest.ofutils.default_timeout = config["default_timeout"] |
| oftest.testutils.MINSIZE = config['minsize'] |
| |
| if os.getuid() != 0 and not config["allow_user"]: |
| print "ERROR: Super-user privileges required. Please re-run with " \ |
| "sudo or as root." |
| sys.exit(1) |
| |
| if config["random_seed"] is not None: |
| logging.info("Random seed: %d" % config["random_seed"]) |
| random.seed(config["random_seed"]) |
| |
| # Remove python's signal handler which raises KeyboardError. Exiting from an |
| # exception waits for all threads to terminate which might not happen. |
| signal.signal(signal.SIGINT, signal.SIG_DFL) |
| |
| if __name__ == "__main__": |
| # Set up the dataplane |
| oftest.dataplane_instance = oftest.dataplane.DataPlane(config) |
| for of_port, ifname in config["port_map"].items(): |
| oftest.dataplane_instance.port_add(ifname, of_port) |
| |
| logging.info("*** TEST RUN START: " + time.asctime()) |
| result = unittest.TextTestRunner(verbosity=_verb).run(suite) |
| if oftest.testutils.skipped_test_count > 0: |
| ts = " tests" |
| if oftest.testutils.skipped_test_count == 1: ts = " test" |
| logging.info("Skipped " + str(oftest.testutils.skipped_test_count) + ts) |
| print("Skipped " + str(oftest.testutils.skipped_test_count) + ts) |
| logging.info("*** TEST RUN END : " + time.asctime()) |
| |
| # Shutdown the dataplane |
| oftest.dataplane_instance.kill() |
| oftest.dataplane_instance = None |
| |
| if result.failures or result.errors: |
| # exit(1) hangs sometimes |
| os._exit(1) |
| if oftest.testutils.skipped_test_count > 0 and config["fail_skipped"]: |
| os._exit(1) |