Initial oftest skeleton with wrapper generators and pylibopenflow
diff --git a/doc/Doxyfile b/doc/Doxyfile
new file mode 100644
index 0000000..3def26f
--- /dev/null
+++ b/doc/Doxyfile
@@ -0,0 +1,1418 @@
+# Doxyfile 1.5.6
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME =
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY =
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek,
+# Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish,
+# Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish,
+# and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+
+DETAILS_AT_TOP = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = YES
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen to replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespace are hidden.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page. This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = "../src/controller" "../src/dataplane" \
+ "../src/ofmsg" "../src/packet"
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90
+
+FILE_PATTERNS = "*.py"
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER = "python /usr/bin/doxypy.py"
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code. Otherwise they will link to the documentstion.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to FRAME, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature. Other possible values
+# for this tag are: HIERARCHIES, which will generate the Groups, Directories,
+# and Class Hiererachy pages using a tree view instead of an ordered list;
+# ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which
+# disables this behavior completely. For backwards compatibility with previous
+# releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE
+# respectively.
+
+GENERATE_TREEVIEW = NONE
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# By default doxygen will write a font called FreeSans.ttf to the output
+# directory and reference it in all dot files that doxygen generates. This
+# font does not include all possible unicode characters however, so when you need
+# these (or just want a differently looking font) you can specify the font name
+# using DOT_FONTNAME. You need need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is enabled by default, which results in a transparent
+# background. Warning: Depending on the platform used, enabling this option
+# may lead to badly anti-aliased labels on the edges of a graph (i.e. they
+# become hard to read).
+
+DOT_TRANSPARENT = YES
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/src/python/oftest/ofmsg/action_list.py b/src/python/oftest/ofmsg/action_list.py
new file mode 100644
index 0000000..83dc850
--- /dev/null
+++ b/src/python/oftest/ofmsg/action_list.py
@@ -0,0 +1,140 @@
+"""
+OpenFlow actions list class
+"""
+
+from action import *
+from ofp import ofp_header
+
+# # Map OFP action identifiers to the actual structures used on the wire
+# action_object_map = {
+# OFPAT_OUTPUT : ofp_action_output,
+# OFPAT_SET_VLAN_VID : ofp_action_vlan_vid,
+# OFPAT_SET_VLAN_PCP : ofp_action_vlan_pcp,
+# OFPAT_STRIP_VLAN : ofp_action_header,
+# OFPAT_SET_DL_SRC : ofp_action_dl_addr,
+# OFPAT_SET_DL_DST : ofp_action_dl_addr,
+# OFPAT_SET_NW_SRC : ofp_action_nw_addr,
+# OFPAT_SET_NW_DST : ofp_action_nw_addr,
+# OFPAT_SET_NW_TOS : ofp_action_nw_tos,
+# OFPAT_SET_TP_SRC : ofp_action_tp_port,
+# OFPAT_SET_TP_DST : ofp_action_tp_port,
+# OFPAT_ENQUEUE : ofp_action_enqueue
+# }
+
+# For debugging
+action_object_map = {
+ OFPAT_OUTPUT : action_output,
+ OFPAT_SET_VLAN_VID : action_set_vlan_vid,
+ OFPAT_SET_VLAN_PCP : action_set_vlan_pcp,
+ OFPAT_STRIP_VLAN : action_strip_vlan,
+ OFPAT_SET_DL_SRC : action_set_dl_src,
+ OFPAT_SET_DL_DST : action_set_dl_dst,
+ OFPAT_SET_NW_SRC : action_set_nw_src,
+ OFPAT_SET_NW_DST : action_set_nw_dst,
+ OFPAT_SET_NW_TOS : action_set_nw_tos,
+ OFPAT_SET_TP_SRC : action_set_tp_src,
+ OFPAT_SET_TP_DST : action_set_tp_dst,
+ OFPAT_ENQUEUE : action_enqueue
+}
+
+class action_list(object):
+ """
+ Maintain a list of actions
+
+ Data members:
+ @arg actions: An array of action objects such as action_output, etc.
+
+ Methods:
+ @arg pack: Pack the structure into a string
+ @arg unpack: Unpack a string to objects, with proper typing
+ @arg add: Add an action to the list; you can directly access
+ the action member, but add will validate that the added object
+ is an action.
+
+ """
+
+ def __init__(self):
+ self.actions = []
+
+ def pack(self):
+ """
+ Pack a list of actions
+
+ Returns the packed string
+ """
+
+ packed = ""
+ for act in self.actions:
+ packed += act.pack()
+ return packed
+
+ def unpack(self, binary_string, bytes=None):
+ """
+ Unpack a list of actions
+
+ Unpack actions from a binary string, creating an array
+ of objects of the appropriate type
+
+ @param binary_string The string to be unpacked
+
+ @param bytes The total length of the action list in bytes.
+ Ignored if decode is True. If None and decode is false, the
+ list is assumed to extend through the entire string.
+
+ @return The remainder of binary_string that was not parsed
+
+ """
+ if bytes == None:
+ bytes = len(binary_string)
+ bytes_done = 0
+ count = 0
+ cur_string = binary_string
+ while bytes_done < bytes:
+ hdr = ofp_action_header()
+ hdr.unpack(cur_string)
+ if not hdr.type in action_object_map.keys():
+ print "WARNING: Skipping unknown action ", hdr.type
+ else:
+ print "DEBUG: Found action of type ", hdr.type
+ self.actions.append(action_object_map[hdr.type]())
+ self.actions[count].unpack(binary_string)
+ count += 1
+ cur_string = cur_string[hdr.len:]
+ bytes_done += hdr.len
+ return cur_string
+
+ def add(self, action):
+ """
+ Add an action to an action list
+
+ @param action The action to add
+
+ @return True if successful, False if not an action object
+
+ """
+ if isinstance(action, action_class_list):
+ self.actions.append(action)
+ return True
+ return False
+
+ def __len__(self):
+ length = 0
+ for act in self.actions:
+ length += act.__len__()
+ return length
+
+ def __eq__(self, other):
+ if type(self) != type(other): return False
+ if self.actions != other.actions: return False
+ return True
+
+ def __ne__(self, other): return not self.__eq__(other)
+
+ def show(self, prefix=''):
+ print prefix + "Action List with " + str(len(self.actions)) + \
+ " actions"
+ count = 0
+ for obj in self.actions:
+ count += 1
+ print " Action " + str(count) + ": "
+ obj.show(prefix + ' ')
diff --git a/src/python/oftest/ofmsg/of_message.py b/src/python/oftest/ofmsg/of_message.py
new file mode 100644
index 0000000..bd6d639
--- /dev/null
+++ b/src/python/oftest/ofmsg/of_message.py
@@ -0,0 +1,127 @@
+
+from message import *
+from error import *
+from action import *
+from action_list import action_list
+from ofp import *
+
+"""
+of_message.py
+Contains wrapper functions and classes for the of_message namespace
+that are generated by hand. It includes the rest of the wrapper
+function information into the of_message namespace
+"""
+
+# These message types are subclassed
+msg_type_subclassed = [
+ OFPT_STATS_REQUEST,
+ OFPT_STATS_REPLY,
+ OFPT_ERROR
+]
+
+# Maps from sub-types to classes
+stats_reply_to_class_map = {
+ OFPST_DESC : desc_stats_reply,
+ OFPST_AGGREGATE : aggregate_stats_reply,
+ OFPST_FLOW : flow_stats_reply,
+ OFPST_TABLE : table_stats_reply,
+ OFPST_PORT : port_stats_reply,
+ OFPST_QUEUE : queue_stats_reply
+}
+
+stats_request_to_class_map = {
+ OFPST_DESC : desc_stats_request,
+ OFPST_AGGREGATE : aggregate_stats_request,
+ OFPST_FLOW : flow_stats_request,
+ OFPST_TABLE : table_stats_request,
+ OFPST_PORT : port_stats_request,
+ OFPST_QUEUE : queue_stats_request
+}
+
+error_to_class_map = {
+ OFPET_HELLO_FAILED : hello_failed_error_msg,
+ OFPET_BAD_REQUEST : bad_request_error_msg,
+ OFPET_BAD_ACTION : bad_action_error_msg,
+ OFPET_FLOW_MOD_FAILED : flow_mod_failed_error_msg,
+ OFPET_PORT_MOD_FAILED : port_mod_failed_error_msg,
+ OFPET_QUEUE_OP_FAILED : queue_op_failed_error_msg
+}
+
+# Map from header type value to the underlieing message class
+msg_type_to_class_map = {
+ OFPT_HELLO : hello,
+ OFPT_ERROR : error,
+ OFPT_ECHO_REQUEST : echo_request,
+ OFPT_ECHO_REPLY : echo_reply,
+ OFPT_VENDOR : vendor,
+ OFPT_FEATURES_REQUEST : features_request,
+ OFPT_FEATURES_REPLY : features_reply,
+ OFPT_GET_CONFIG_REQUEST : get_config_request,
+ OFPT_GET_CONFIG_REPLY : get_config_reply,
+ OFPT_SET_CONFIG : set_config,
+ OFPT_PACKET_IN : packet_in,
+ OFPT_FLOW_REMOVED : flow_removed,
+ OFPT_PORT_STATUS : port_status,
+ OFPT_PACKET_OUT : packet_out,
+ OFPT_FLOW_MOD : flow_mod,
+ OFPT_PORT_MOD : port_mod,
+ OFPT_STATS_REQUEST : stats_request,
+ OFPT_STATS_REPLY : stats_reply,
+ OFPT_BARRIER_REQUEST : barrier_request,
+ OFPT_BARRIER_REPLY : barrier_reply,
+ OFPT_QUEUE_GET_CONFIG_REQUEST : queue_get_config_request,
+ OFPT_QUEUE_GET_CONFIG_REPLY : queue_get_config_reply
+}
+
+def _of_message_to_object(binary_string):
+ """
+ Map a binary string to the corresponding class.
+
+ Appropriately resolves subclasses
+ """
+ hdr = ofp_header()
+ hdr.unpack(binary_string)
+ # FIXME: Add error detection
+ if not hdr.type in msg_type_subclassed:
+ return msg_type_to_class_map[hdr.type]()
+ if hdr.type == OFPT_STATS_REQUEST:
+ st_hdr = ofp_stats_request()
+ st_hdr.unpack(binary_string)
+ return stats_request_to_class_map[st_hdr.type]()
+ elif hdr.type == OFPT_STATS_REPLY:
+ st_hdr = ofp_stats_reply()
+ st_hdr.unpack(binary_string)
+ return stats_reply_to_class_map[st_hdr.type]()
+ elif hdr.type == OFPT_STATS_REPLY:
+ st_hdr = ofp_error_msg()
+ st_hdr.unpack(binary_string)
+ return error_to_class_map[st_hdr.type]()
+ else:
+ print "ERROR parsing packet to object"
+ return None
+
+def of_message_parse(binary_string, raw=False):
+ """
+ Parse an OpenFlow packet
+
+ Parses a raw OpenFlow packet into a Python class, with class
+ members fully populated.
+
+ @param binary_string The packet (string) to be parsed
+
+ @param raw If true, interpret the packet as an L2 packet. Not
+ yet supported.
+
+ @return An object of some message class or None if fails
+
+ """
+
+ if raw:
+ print "raw packet message parsing not supported"
+ return None
+
+ obj = _of_message_to_object(binary_string)
+ if obj != None:
+ obj.unpack(binary_string)
+ return obj
+
diff --git a/tools/munger/Makefile b/tools/munger/Makefile
new file mode 100644
index 0000000..5dd3f5c
--- /dev/null
+++ b/tools/munger/Makefile
@@ -0,0 +1,79 @@
+#
+# Simple make file to generate OpenFlow python files
+#
+# Fixme: Would like pylibopenflow to be able to run remotely
+# Currently, we have to cd to it's dir and refer back to local
+
+TOP_DIR = ../..
+TOOLS_DIR = ..
+DOC_DIR = ${TOP_DIR}/doc
+
+PYLIBOF_DIR = ${TOOLS_DIR}/pylibopenflow
+
+TARGET_DIR = ${TOP_DIR}/src/python/oftest/ofmsg
+
+# Relative to pyopenflow-pythonize exec location
+OF_HEADER = include/openflow.h
+
+# Relative to here
+ABS_OF_HEADER = ${PYLIBOF_DIR}/${OF_HEADER}
+
+PYTHONIZE = bin/pyopenflow-pythonize.py
+OFP_GEN_CMD = (cd ${PYLIBOF_DIR} && ${PYTHONIZE} -i ${OF_HEADER} \
+ ${TARGET_DIR}/ofp.py)
+
+# Dependencies for ofp.py
+OFP_DEP = ${ABS_OF_HEADER} $(wildcard ${PYLIBOF_DIR}/pylib/*.py)
+OFP_DEP = $(wildcard ${PYLIBOF_DIR}/pylib/of/*.py)
+
+# FIXME: There are three types of .py files:
+# ofp.py from pylibopenflow output
+# %.py generated from %_gen.py
+# of_message.py and action_list.py -- hand built, already in src dir
+
+GEN_FILES := $(addprefix ${TARGET_DIR}/,ofp.py message.py error.py action.py)
+OTHER_FILES := $(addprefix ${TARGET_DIR}/,action_list.py of_message.py)
+LINT_SOURCE := ${GEN_FILES} ${OTHER_FILES}
+LINT_FILES := $(subst .py,.log,${LINT_SOURCE})
+LINT_FILES := $(subst ${TARGET_DIR}/,lint/,${LINT_FILES})
+
+all: ${GEN_FILES}
+ @echo "Generated files"
+
+${TARGET_DIR}/ofp.py: ${OFP_DEP}
+ ${OFP_GEN_CMD}
+
+# General rule like src/message.py comes from scripts/message_gen.py
+${TARGET_DIR}/%.py: scripts/%_gen.py ${TARGET_DIR}/ofp.py
+ python $< > $@
+
+lint/%.log: ${TARGET_DIR}/%.py
+ (cd ${TARGET_DIR} && pylint -e $(notdir $<)) > $@
+
+lint: ${LINT_FILES}
+
+# For now. just local source doc generated
+doc: ${GEN_FILES} ${OTHER_FILES} ${DOC_DIR}/Doxyfile
+ (cd ${DOC_DIR} && doxygen)
+
+clean:
+ rm -rf ${GEN_FILES} ${LINT_FILES} ${DOC_DIR}/html/*
+
+help:
+ @echo
+ @echo Makefile for oftest source munger
+ @echo Default builds python files and installs in ${TARGET_DIR}
+ @echo make local: Generate files and put in src/
+ @echo
+ @echo Debug info:
+ @echo
+ @echo Files generated GEN_FILES: ${GEN_FILES}
+ @echo
+ @echo Dependencies for ofp.py OFP_DEP: ${OFP_DEP}
+ @echo
+ @echo Already created files OTHER_FILES: ${OTHER_FILES}
+ @echo
+ @echo LINT_FILES: ${LINT_FILES}
+
+
+.PHONY: all local install help doc lint clean
diff --git a/tools/munger/scripts/action_gen.py b/tools/munger/scripts/action_gen.py
new file mode 100644
index 0000000..22c7b4b
--- /dev/null
+++ b/tools/munger/scripts/action_gen.py
@@ -0,0 +1,97 @@
+#!/usr/bin/python
+#
+# This python script generates action subclasses
+#
+
+import re
+
+print """
+# Python OpenFlow action wrapper classes
+
+from ofp import *
+
+# This will never happen; done to avoid lint warning
+if __name__ == '__main__':
+ def of_message_parse(msg): return None
+
+"""
+
+################################################################
+#
+# Action subclasses
+#
+################################################################
+
+action_structs = [
+ 'output',
+ 'vlan_vid',
+ 'vlan_pcp',
+ 'dl_addr',
+ 'nw_addr',
+ 'tp_port',
+ 'nw_tos',
+ 'vendor_header']
+
+action_types = [
+ 'output',
+ 'set_vlan_vid',
+ 'set_vlan_pcp',
+ 'strip_vlan',
+ 'set_dl_src',
+ 'set_dl_dst',
+ 'set_nw_src',
+ 'set_nw_dst',
+ 'set_nw_tos',
+ 'set_tp_src',
+ 'set_tp_dst',
+ 'enqueue',
+ 'vendor'
+]
+action_types.sort()
+
+action_class_map = {
+ 'output' : 'ofp_action_output',
+ 'set_vlan_vid' : 'ofp_action_vlan_vid',
+ 'set_vlan_pcp' : 'ofp_action_vlan_pcp',
+ 'strip_vlan' : 'ofp_action_header',
+ 'set_dl_src' : 'ofp_action_dl_addr',
+ 'set_dl_dst' : 'ofp_action_dl_addr',
+ 'set_nw_src' : 'ofp_action_nw_addr',
+ 'set_nw_dst' : 'ofp_action_nw_addr',
+ 'set_nw_tos' : 'ofp_action_nw_tos',
+ 'set_tp_src' : 'ofp_action_tp_port',
+ 'set_tp_dst' : 'ofp_action_tp_port',
+ 'enqueue' : 'ofp_action_enqueue',
+ 'vendor' : 'ofp_action_vendor_header'
+}
+
+template = """
+class action_--TYPE--(--PARENT_TYPE--):
+ \"""
+ Wrapper class for --TYPE-- action object
+ \"""
+ def __init__(self):
+ --PARENT_TYPE--.__init__(self)
+ self.type = --ACTION_NAME--
+ self.len = self.__len__()
+ def show(self, prefix=''):
+ print prefix + "action_--TYPE--"
+ --PARENT_TYPE--.show(self, prefix)
+"""
+
+if __name__ == '__main__':
+ for (t, parent) in action_class_map.items():
+ action_name = "OFPAT_" + t.upper()
+ to_print = re.sub('--TYPE--', t, template)
+ to_print = re.sub('--PARENT_TYPE--', parent, to_print)
+ to_print = re.sub('--ACTION_NAME--', action_name, to_print)
+ print to_print
+
+ # Generate a list of action classes
+ print "action_class_list = ("
+ prev = None
+ for (t, parent) in action_class_map.items():
+ if prev:
+ print " action_" + prev + ","
+ prev = t
+ print " action_" + prev + ")"
diff --git a/tools/munger/scripts/error_gen.py b/tools/munger/scripts/error_gen.py
new file mode 100644
index 0000000..7dff087
--- /dev/null
+++ b/tools/munger/scripts/error_gen.py
@@ -0,0 +1,75 @@
+#!/usr/bin/python
+#
+# This python script generates error subclasses
+#
+
+import re
+
+print """
+# Python OpenFlow error wrapper classes
+
+from ofp import *
+
+# This will never happen; done to avoid lint warning
+if __name__ == '__main__':
+ def of_message_parse(msg): return None
+
+"""
+
+################################################################
+#
+# Error message subclasses
+#
+################################################################
+
+# Template for error subclasses
+
+template = """
+class --TYPE--_error_msg(ofp_error_msg):
+ \"""
+ Wrapper class for --TYPE-- error message class
+ \"""
+ def __init__(self):
+ ofp_error_msg.__init__(self)
+ self.header = ofp_header()
+ self.header.type = OFPT_ERROR
+ self.type = --ERROR_NAME--
+ self.data = ""
+
+ def pack(self, assertstruct=True):
+ self.header.length = self.__len__()
+ packed = ofp_error_msg.pack(self)
+ packed += self.data
+
+ def unpack(self, binary_string):
+ binary_string = ofp_error_msg.unpack(self, binary_string)
+ self.data = binary_string
+ return []
+
+ def __len__(self):
+ return OFP_HEADER_BYTES + OFP_ERROR_MSG_BYTES + len(self.data)
+
+ def show(self, prefix=''):
+ print prefix + "--TYPE--_error_msg"
+ ofp_error_msg.show(self)
+ print prefix + "data is of length " + len(self.data)
+ obj = of_message_parse(self.data)
+ if obj != None:
+ obj.show()
+ else:
+ print prefix + "Unable to parse data"
+"""
+
+error_types = [
+ 'hello_failed',
+ 'bad_request',
+ 'bad_action',
+ 'flow_mod_failed',
+ 'port_mod_failed',
+ 'queue_op_failed']
+
+for t in error_types:
+ error_name = "OFPET_" + t.upper()
+ to_print = re.sub('--TYPE--', t, template)
+ to_print = re.sub('--ERROR_NAME--', error_name, to_print)
+ print to_print
diff --git a/tools/munger/scripts/message_gen.py b/tools/munger/scripts/message_gen.py
new file mode 100644
index 0000000..f065874
--- /dev/null
+++ b/tools/munger/scripts/message_gen.py
@@ -0,0 +1,650 @@
+#!/usr/bin/python
+#
+# This python script generates wrapper functions for OpenFlow messages
+#
+# See the doc string below for more info
+#
+
+# To do:
+# Default type values for messages
+# Generate all message objects
+# Action list objects?
+# Autogen lengths when possible
+# Dictionaries for enum strings
+# Resolve sub struct initializers (see ofp_flow_mod)
+
+
+"""
+Generate wrapper classes for OpenFlow messages
+
+(C) Copyright Stanford University
+Date February 2010
+Created by dtalayco
+
+Attempting to follow http://www.python.org/dev/peps/pep-0008/
+The main exception is that our class names do not use CamelCase
+so as to more closely match the original C code names.
+
+This file is meant to generate a file of_wrapper.py which imports
+the base classes generated form automatic processing of openflow.h
+and produces wrapper classes for each OpenFlow message type.
+
+This file will normally be included in of_message.py which provides
+additional hand-generated work.
+
+There are two types of structures/classes here: base components and
+message classes.
+
+Base components are the base data classes which are fixed
+length structures including:
+ ofp_header
+ Each ofp_action structure
+ ofp_phy_port
+ The array elements of all the stats reply messages
+The base components are to be imported from a file of_header.py.
+
+Message classes define a complete message on the wire. These are
+comprised of possibly variable length lists of possibly variably
+typed objects from the base component list above.
+
+Each OpenFlow message has a header and zero or more fixed length
+members (the "core members" of the class) followed by zero or more
+variable length lists.
+
+The wrapper classes should live in their own name space, probably
+of_message. Automatically generated base component and skeletons for
+the message classes are assumed generated and the wrapper classes
+will inherit from those.
+
+Every message class must implement pack and unpack functions to
+convert between the class and a string representing what goes on the
+wire.
+
+For unpacking, the low level (base-component) classes must implement
+their own unpack functions. A single top level unpack function
+will do the parsing and call the lower layer unpack functions as
+appropriate.
+
+Every base and message class should implement a show function to
+(recursively) display the contents of the object.
+
+Certain OpenFlow message types are further subclassed. These include
+stats_request, stats_reply and error.
+
+"""
+
+# Don't generate header object in messages
+# Map each message to a body that doesn't include the header
+# The body has does not include variable length info at the end
+
+import re
+import string
+import sys
+
+message_top_matter = """
+# Python OpenFlow message wrapper classes
+
+from ofp import *
+from action_list import action_list
+
+# This will never happen; done to avoid lint warning
+if __name__ == '__main__':
+ def of_message_parse(msg): return None
+
+# Define templates for documentation
+class ofp_template_msg:
+ \"""
+ Sample base class for template_msg; normally auto generated
+ This class should live in the of_header name space and provides the
+ base class for this type of message. It will be wrapped for the
+ high level API.
+
+ \"""
+ def __init__(self):
+ \"""
+ Constructor for base class
+
+ \"""
+ self.header = ofp_header()
+ # Additional base data members declared here
+ # Normally will define pack, unpack, __len__ functions
+
+class template_msg(ofp_template_msg):
+ \"""
+ Sample class wrapper for template_msg
+ This class should live in the of_message name space and provides the
+ high level API for an OpenFlow message object. These objects must
+ implement the functions indicated in this template.
+
+ \"""
+ def __init__(self):
+ \"""
+ Constructor
+ Must set the header type value appropriately for the message
+
+ \"""
+ ofp_template_msg.__init__(self)
+ # For a real message, will be set to an integer
+ self.header.type = "TEMPLATE_MSG_VALUE"
+ def pack(self):
+ \"""
+ Pack object into string
+
+ @return The packed string which can go on the wire
+
+ \"""
+ pass
+ def unpack(self, binary_string):
+ \"""
+ Unpack object from a binary string
+
+ @param binary_string The wire protocol byte string holding the object
+ represented as an array of bytes.
+
+ @return Typically returns the remainder of binary_string that
+ was not parsed. May give a warning if that string is non-empty
+
+ \"""
+ pass
+ def __len__(self):
+ \"""
+ Return the length of this object once packed into a string
+
+ @return An integer representing the number bytes in the packed
+ string.
+
+ \"""
+ pass
+ def show(self, prefix=''):
+ \"""
+ Display the contents of the object in a readable manner
+
+ @param prefix Printed at the beginning of each line.
+
+ \"""
+ pass
+ def __eq__(self, other):
+ \"""
+ Return True if self and other hold the same data
+
+ @param other Other object in comparison
+
+ \"""
+ pass
+ def __ne__(self, other):
+ \"""
+ Return True if self and other do not hold the same data
+
+ @param other Other object in comparison
+
+ \"""
+ pass
+"""
+
+# Dictionary mapping wrapped classes to the auto-generated structure
+# underlieing the class (body only, not header or var-length data)
+message_class_map = {
+ "hello" : "ofp_header",
+ "error" : "ofp_error_msg",
+ "echo_request" : "ofp_header",
+ "echo_reply" : "ofp_header",
+ "vendor" : "ofp_vendor_header",
+ "features_request" : "ofp_header",
+ "features_reply" : "ofp_switch_features",
+ "get_config_request" : "ofp_header",
+ "get_config_reply" : "ofp_switch_config",
+ "set_config" : "ofp_switch_config",
+ "packet_in" : "ofp_packet_in",
+ "flow_removed" : "ofp_flow_removed",
+ "port_status" : "ofp_port_status",
+ "packet_out" : "ofp_packet_out",
+ "flow_mod" : "ofp_flow_mod",
+ "port_mod" : "ofp_port_mod",
+ "stats_request" : "ofp_stats_request",
+ "stats_reply" : "ofp_stats_reply",
+ "barrier_request" : "ofp_header",
+ "barrier_reply" : "ofp_header",
+ "queue_get_config_request" : "ofp_queue_get_config_request",
+ "queue_get_config_reply" : "ofp_queue_get_config_reply"
+}
+
+# These messages have a string member at the end of the data
+string_members = [
+ "hello",
+ "error",
+ "echo_request",
+ "echo_reply",
+ "vendor",
+ "packet_in",
+ "packet_out"
+]
+
+# These messages have a list (with the given name) in the data,
+# after the core members; the type is given for validation
+list_members = {
+ "features_reply" : ('ports', None),
+ "packet_out" : ('actions', 'action_list'),
+ "flow_mod" : ('actions', 'action_list'),
+ "queue_get_config_reply" : ('queues', None)
+}
+
+_ind = " "
+
+def _p1(s): print _ind + s
+def _p2(s): print _ind * 2 + s
+def _p3(s): print _ind * 3 + s
+def _p4(s): print _ind * 4 + s
+
+# Okay, this gets kind of ugly:
+# There are three variables:
+# has_core_members: If parent class is not ofp_header, has inheritance
+# has_list: Whether class has trailing array or class
+# has_string: Whether class has trailing string
+
+def gen_message_wrapper(msg):
+ """
+ Generate a wrapper for the given message based on above info
+ @param msg String identifying the message name for the class
+ """
+
+ msg_name = "OFPT_" + msg.upper()
+ parent = message_class_map[msg]
+
+ has_list = False # Has trailing list
+ has_core_members = False
+ has_string = False # Has trailing string
+ if parent != 'ofp_header':
+ has_core_members = True
+ if msg in list_members.keys():
+ (list_var, list_type) = list_members[msg]
+ has_list = True
+ if msg in string_members:
+ has_string = True
+
+ if has_core_members:
+ print "class " + msg + "(" + parent + "):"
+ else:
+ print "class " + msg + ":"
+ _p1('"""')
+ _p1("Wrapper class for " + msg)
+ print
+ if has_list:
+ if list_type == None:
+ _p1("Has trailing variable array " + list_var);
+ else:
+ _p1("Has trailing object " + list_var + " of type " + list_type);
+ print
+ if has_string:
+ _p1("Has trailing string data")
+ print
+ _p1('"""')
+
+ print
+ _p1("def __init__(self):")
+ if has_core_members:
+ _p2(parent + ".__init__(self)")
+ _p2("self.header = ofp_header()")
+ _p2("self.header.type = " + msg_name)
+ if has_list:
+ if list_type == None:
+ _p2('self.' + list_var + ' = []')
+ else:
+ _p2('self.' + list_var + ' = ' + list_type + '()')
+ if has_string:
+ _p2('self.data = ""')
+
+ print
+ _p1("def pack(self):")
+ _p2("# Fixme: Calculate length for header, etc, once __len__ fixed")
+ _p2("packed = self.header.pack()")
+ if has_core_members:
+ _p2("packed += " + parent + ".pack()")
+ if has_list:
+ if list_type == None:
+ _p2('for obj in self.' + list_var + ':')
+ _p3('packed += obj.pack()')
+ else:
+ _p2('packed += self.' + list_var + '.pack()')
+ if has_string:
+ _p2('packed += self.data')
+
+ print
+ _p1("def unpack(self, binary_string):")
+ _p2("binary_string = self.header.unpack(binary_string)")
+ if has_core_members:
+ _p2("binary_string = " + parent + ".unpack(self, binary_string)")
+ if has_list:
+ if list_type == None:
+ _p2("for obj in self." + list_var + ":")
+ _p3("binary_string = obj.unpack(binary_string)")
+ elif msg == "packet_out": # Special case this
+ _p2('binary_string = self.actions.unpack(bytes=self.actions_len)')
+ elif msg == "flow_mod": # Special case this
+ _p2("ai_len = self.header.length - OFP_FLOW_MOD_BYTES")
+ _p2("binary_string = self.actions.unpack(bytes=ai_len)")
+ else:
+ _p2("binary_string = self." + list_var + ".unpack(binary_string)")
+ if has_string:
+ _p2("self.data = binary_string")
+ _p2("binary_string = ''")
+ else:
+ _p2("# Fixme: If no self.data, add check for data remaining")
+ _p2("return binary_string")
+
+ print
+ _p1("def __len__(self):")
+ _p2("# Fixme: Do the right thing")
+ _p2("return len(self.pack())")
+
+ print
+ _p1("def show(self, prefix=''):")
+ _p2("print prefix + '" + msg + " (" + msg_name + ")'")
+ _p2("prefix += ' '")
+ _p2("self.header.show(prefix)")
+ if has_core_members:
+ _p2(parent + ".show(self, prefix)")
+ if has_list:
+ if list_type == None:
+ _p2('print prefix + "Array ' + list_var + '"')
+ _p2('for obj in self.' + list_var +':')
+ _p3("obj.show(prefix + ' ')")
+ else:
+ _p2('print prefix + "List ' + list_var + '"')
+ _p2('self.' + list_var + ".show(prefix + ' ')")
+ if has_string:
+ _p2("print prefix + 'data is of length ' + str(len(self.data))")
+ _p2("if len(self.data) > 0:")
+ _p3("obj = of_message_parse(self.data)")
+ _p3("if obj != None:")
+ _p4("obj.show(prefix)")
+ _p3("else:")
+ _p4('print prefix + "Unable to parse data"')
+
+ print
+ _p1("def __eq__(self, other):")
+ _p2("if type(self) != type (other): return False")
+ _p2("if self.header.__ne__(other.header): return False")
+ if has_core_members:
+ _p2("if " + parent + ".__ne__(other." + parent + "): return False")
+ if has_string:
+ _p2("if self.data != other.data: return False")
+ if has_list:
+ _p2("if self." + list_var + " != other." + list_var + ": return False")
+ _p2("return True")
+
+ print
+ _p1("def __ne__(self, other): return not self.__eq__(other)")
+
+
+
+################################################################
+#
+# Stats request subclasses
+# description_request, flow, aggregate, table, port, vendor
+#
+################################################################
+
+# table and desc stats requests are special with empty body
+extra_ofp_stats_req_defs = """
+# Stats request bodies for desc and table stats are not defined in the
+# OpenFlow header; We define them here. They are empty classes, really
+
+class ofp_desc_stats_request:
+ \"""
+ Forced definition of ofp_desc_stats_request (empty class)
+ \"""
+ def __init__(self):
+ pass
+ def pack(self, assertstruct=True):
+ return ""
+ def unpack(self, binary_string):
+ return binary_string
+ def __len__(self):
+ return 0
+ def show(self, prefix=''):
+ pass
+ def __eq__(self, other):
+ return type(self) == type(other)
+ def __ne__(self, other):
+ return type(self) != type(other)
+
+OFP_DESC_STATS_REQUEST_BYTES = 0
+
+class ofp_table_stats_request:
+ \"""
+ Forced definition of ofp_table_stats_request (empty class)
+ \"""
+ def __init__(self):
+ pass
+ def pack(self, assertstruct=True):
+ return ""
+ def unpack(self, binary_string):
+ return binary_string
+ def __len__(self):
+ return 0
+ def show(self, prefix=''):
+ pass
+ def __eq__(self, other):
+ return type(self) == type(other)
+ def __ne__(self, other):
+ return type(self) != type(other)
+
+OFP_TABLE_STATS_REQUEST_BYTES = 0
+
+"""
+
+stats_request_template = """
+class --TYPE--_stats_request(ofp_stats_request, ofp_--TYPE--_stats_request):
+ \"""
+ Wrapper class for --TYPE-- stats request message
+ \"""
+ def __init__(self):
+ self.header = ofp_header()
+ ofp_stats_request.__init__(self)
+ ofp_--TYPE--_stats_request.__init__(self)
+ self.header.type = OFPT_STATS_REQUEST
+ self.type = --STATS_NAME--
+
+ def pack(self, assertstruct=True):
+ packed = ofp_stats_request.pack(self)
+ packed += ofp_--TYPE--_stats_request.pack(self)
+
+ def unpack(self, binary_string):
+ binary_string = ofp_stats_request.unpack(self, binary_string)
+ binary_string = ofp_--TYPE--_stats_request.unpack(self, binary_string)
+ if len(binary_string) != 0:
+ print "Error unpacking --TYPE--: extra data"
+ return binary_string
+
+ def __len__(self):
+ return len(self.header) + OFP_STATS_REQUEST_BYTES + \\
+ OFP_--TYPE_UPPER--_STATS_REQUEST_BYTES
+
+ def show(self, prefix=''):
+ print prefix + "--TYPE--_stats_request"
+ ofp_stats_request.show(self)
+ ofp_--TYPE--_stats_request.show(self)
+
+ def __eq__(self, other):
+ return (ofp_stats_request.__eq__(self, other) and
+ ofp_--TYPE--_stats_request.__eq__(self, other))
+
+ def __ne__(self, other): return not self.__eq__(other)
+"""
+
+################################################################
+#
+# Stats replies always have an array at the end.
+# For aggregate and desc, these arrays are always of length 1
+# This array is always called stats
+#
+################################################################
+
+
+# Template for objects stats reply messages
+stats_reply_template = """
+class --TYPE--_stats_reply(ofp_stats_reply):
+ \"""
+ Wrapper class for --TYPE-- stats reply
+ \"""
+ def __init__(self):
+ self.header = ofp_header()
+ ofp_stats_reply.__init__(self)
+ self.header.type = OFPT_STATS_REPLY
+ self.type = --STATS_NAME--
+ # stats: Array of type --TYPE--_stats_entry
+ self.stats = []
+
+ def pack(self, assertstruct=True):
+ packed = ofp_stats_reply.pack(self)
+ for obj in self.stats:
+ packed += obj.pack()
+
+ def unpack(self, binary_string):
+ binary_string = ofp_stats_reply.unpack(self, binary_string)
+ dummy = --TYPE--_stats_entry()
+ while len(binary_string) >= len(dummy):
+ obj = --TYPE--_stats_entry()
+ binary_string = obj.unpack(binary_string)
+ self.stats.append(obj)
+ if len(binary_string) != 0:
+ print "ERROR unpacking --TYPE-- stats string: extra bytes"
+ return binary_string
+
+ def __len__(self):
+ length = len(self.header) + OFP_STATS_REPLY_BYTES
+ for obj in self.stats:
+ length += len(obj)
+ return length
+
+ def show(self, prefix=''):
+ print prefix + "--TYPE--_stats_reply"
+ ofp_stats_reply.show(self)
+ for obj in self.stats:
+ obj.show()
+
+ def __eq__(self, other):
+ if ofp_stats_reply.__ne__(self, other): return False
+ return self.stats == other.stats
+
+ def __ne__(self, other): return not self.__eq__(other)
+"""
+
+#
+# To address variations in stats reply bodies, the following
+# "_entry" classes are defined for each element in the reply
+#
+
+extra_stats_entry_defs = """
+# Stats entries define the content of one element in a stats
+# reply for the indicated type; define _entry for consistency
+
+aggregate_stats_entry = ofp_aggregate_stats_reply
+desc_stats_entry = ofp_desc_stats
+port_stats_entry = ofp_port_stats
+queue_stats_entry = ofp_queue_stats
+table_stats_entry = ofp_table_stats
+"""
+
+# Special case flow_stats to handle actions_list
+
+flow_stats_entry_def = """
+#
+# Flow stats entry contains an action list of variable length, so
+# it is done by hand
+#
+
+class flow_stats_entry(ofp_flow_stats):
+ \"""
+ Special case flow stats entry to handle action list object
+ \"""
+ def __init__(self):
+ ofp_flow_stats.__init__(self)
+ self.actions = action_list()
+
+ def pack(self, assertstruct=True):
+ if self.length < OFP_FLOW_STATS_BYTES:
+ print "ERROR in flow_stats_entry pack: length member is too small"
+ return None
+ packed = ofp_flow_stats.pack(self, assertstruct)
+ packed += self.actions.pack()
+ return packed
+
+ def unpack(self, binary_string):
+ binary_string = ofp_flow_stats.unpack(self, binary_string)
+ ai_len = self.length - OFP_FLOW_STATS_BYTES
+ binary_string = self.actions.unpack(binary_string, bytes=ai_len)
+ return binary_string
+
+ def __len__(self):
+ return OFP_FLOW_STATS_BYTES + len(self.actions)
+
+ def show(self, prefix=''):
+ print prefix + "flow_stats_entry"
+ ofp_flow_stats.show(self, prefix + ' ')
+ self.actions.show(prefix + ' ')
+
+ def __eq__(self, other):
+ return (ofp_flow_stats.__eq__(self, other) and
+ self.actions == other.actions)
+
+ def __ne__(self, other): return not self.__eq__(other)
+"""
+
+stats_types = [
+ 'aggregate',
+ 'desc',
+ 'flow',
+ 'port',
+ 'queue',
+ 'table']
+
+if __name__ == '__main__':
+
+ print message_top_matter
+
+ print """
+################################################################
+#
+# OpenFlow Message Definitions
+#
+################################################################
+"""
+
+ msg_types = message_class_map.keys()
+ msg_types.sort()
+
+ for t in msg_types:
+ gen_message_wrapper(t)
+ print
+
+ print """
+################################################################
+#
+# Stats request and reply subclass definitions
+#
+################################################################
+"""
+
+ print extra_ofp_stats_req_defs
+ print extra_stats_entry_defs
+ print flow_stats_entry_def
+
+ # Generate stats request and reply subclasses
+ for t in stats_types:
+ stats_name = "OFPST_" + t.upper()
+ to_print = re.sub('--TYPE--', t, stats_request_template)
+ to_print = re.sub('--TYPE_UPPER--', t.upper(), to_print)
+ to_print = re.sub('--STATS_NAME--', stats_name, to_print)
+ print to_print
+ to_print = re.sub('--TYPE--', t, stats_reply_template)
+ to_print = re.sub('--STATS_NAME--', stats_name, to_print)
+ print to_print
+
+
+#
+# OFP match variants
+# ICMP 0x801 (?) ==> icmp_type/code replace tp_src/dst
+#
+
+
diff --git a/tools/pylibopenflow/.gitignore b/tools/pylibopenflow/.gitignore
new file mode 100644
index 0000000..2f836aa
--- /dev/null
+++ b/tools/pylibopenflow/.gitignore
@@ -0,0 +1,2 @@
+*~
+*.pyc
diff --git a/tools/pylibopenflow/bin/cstruct2py-get-struct.py b/tools/pylibopenflow/bin/cstruct2py-get-struct.py
new file mode 100755
index 0000000..4b8a350
--- /dev/null
+++ b/tools/pylibopenflow/bin/cstruct2py-get-struct.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python2.5
+"""This script reads struct from C/C++ header file and output query
+
+Author ykk
+Date June 2009
+"""
+import sys
+import getopt
+import cheader
+import c2py
+
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> header_files... struct_name\n"+\
+ "Options:\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ "-c/--cstruct\n\tPrint C struct\n"+\
+ "-n/--name\n\tPrint names of struct\n"+\
+ "-s/--size\n\tPrint size of struct\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hcsn",
+ ["help","cstruct","size","names"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is at least 1 input file and struct name
+if (len(args) < 2):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Print C struct
+printc = False
+##Print names
+printname = False
+##Print size
+printsize = False
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-s","--size")):
+ printsize = True
+ elif (opt in ("-c","--cstruct")):
+ printc = True
+ elif (opt in ("-n","--names")):
+ printname = True
+ else:
+ print "Unhandled option :"+opt
+ sys.exit(1)
+
+headerfile = cheader.cheaderfile(args[:-1])
+cstruct = headerfile.structs[args[-1].strip()]
+cs2p = c2py.cstruct2py()
+pattern = cs2p.get_pattern(cstruct)
+
+#Print C struct
+if (printc):
+ print cstruct
+
+#Print pattern
+print "Python pattern = "+pattern
+
+#Print name
+if (printname):
+ print cstruct.get_names()
+
+#Print size
+if (printsize):
+ print "Size = "+str(cs2p.get_size(pattern))
diff --git a/tools/pylibopenflow/bin/cstruct2py-pythonize.py b/tools/pylibopenflow/bin/cstruct2py-pythonize.py
new file mode 100755
index 0000000..47cbffb
--- /dev/null
+++ b/tools/pylibopenflow/bin/cstruct2py-pythonize.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python2.5
+"""This script reads struct
+
+Author ykk
+Date Jan 2010
+"""
+import sys
+import getopt
+import cpythonize
+import cheader
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> header_files... output_file\n"+\
+ "Options:\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "h",
+ ["help"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Parse options
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ else:
+ print "Unhandled option :"+opt
+ sys.exit(2)
+
+#Check there is at least 1 input file with 1 output file
+if (len(args) < 2):
+ usage()
+ sys.exit(2)
+
+ch = cheader.cheaderfile(args[:-1])
+py = cpythonize.pythonizer(ch)
+fileRef = open(args[len(args)-1], "w")
+for l in py.pycode():
+ fileRef.write(l+"\n")
+fileRef.close()
+
diff --git a/tools/pylibopenflow/bin/cstruct2py-query-cheader.py b/tools/pylibopenflow/bin/cstruct2py-query-cheader.py
new file mode 100755
index 0000000..3d23ef0
--- /dev/null
+++ b/tools/pylibopenflow/bin/cstruct2py-query-cheader.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python2.5
+"""This script reads C/C++ header file and output query
+
+Author ykk
+Date June 2009
+"""
+import sys
+import getopt
+import cheader
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> header_file_1 header_file_2 ...\n"+\
+ "Options:\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ "-E/--enums\n\tPrint all enumerations\n"+\
+ "-e/--enum\n\tPrint specified enumeration\n"+\
+ "-M/--macros\n\tPrint all macros\n"+\
+ "-m/--macro\n\tPrint value of macro\n"+\
+ "-S/--structs\n\tPrint all structs\n"+\
+ "-s/--struct\n\tPrint struct\n"+\
+ "-n/--name-only\n\tPrint names only\n"+\
+ "-P/--print-no-comment\n\tPrint with comment removed only\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hMm:Ee:Ss:nP",
+ ["help","macros","macro=","enums","enum=",
+ "structs","struct="
+ "name-only","print-no-comment"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is at least input file
+if (len(args) < 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Print names only
+nameOnly = False
+##Print all structs?
+allStructs = False
+##Query specific struct
+struct=""
+##Print all enums?
+allEnums = False
+##Query specific enum
+enum=""
+##Print all macros?
+allMacros = False
+##Query specific macro
+macro=""
+##Print without comment
+printNoComment=False
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-S","--structs")):
+ allStructs = True
+ elif (opt in ("-s","--struct")):
+ struct = arg
+ elif (opt in ("-M","--macros")):
+ allMacros = True
+ elif (opt in ("-m","--macro")):
+ macro=arg
+ elif (opt in ("-E","--enums")):
+ allEnums = True
+ elif (opt in ("-e","--enum")):
+ enum = arg
+ elif (opt in ("-n","--name-only")):
+ nameOnly = True
+ elif (opt in ("-P","--print-no-comment")):
+ printNoComment = True
+ else:
+ assert (False,"Unhandled option :"+opt)
+
+headerfile = cheader.cheaderfile(args)
+if (printNoComment):
+ for line in headerfile.content:
+ print line
+ sys.exit(0)
+
+#Print all macros
+if (allMacros):
+ for (macroname, value) in headerfile.macros.items():
+ if (nameOnly):
+ print macroname
+ else:
+ print macroname+"\t=\t"+str(value)
+#Print specified macro
+if (macro != ""):
+ try:
+ print macro+"="+headerfile.macros[macro]
+ except KeyError:
+ print "Macro "+macro+" not found!"
+
+#Print all structs
+if (allStructs):
+ for (structname, value) in headerfile.structs.items():
+ if (nameOnly):
+ print structname
+ else:
+ print str(value)+"\n"
+
+#Print specified struct
+if (struct != ""):
+ try:
+ print str(headerfile.structs[struct])
+ except KeyError:
+ print "Struct "+struct+" not found!"
+
+#Print all enumerations
+if (allEnums):
+ for (enumname, values) in headerfile.enums.items():
+ print enumname
+ if (not nameOnly):
+ for enumval in values:
+ try:
+ print "\t"+enumval+"="+\
+ str(headerfile.enum_values[enumval])
+ except KeyError:
+ print enumval+" not found in enum!";
+
+#Print specifed enum
+if (enum != ""):
+ try:
+ for enumval in headerfile.enums[enum]:
+ try:
+ print enumval+"="+str(headerfile.enum_values[enumval])
+ except KeyError:
+ print enumval+" not found in enum!";
+ except KeyError:
+ print "Enumeration "+enum+" not found!"
diff --git a/tools/pylibopenflow/bin/pyopenflow-get-struct.py b/tools/pylibopenflow/bin/pyopenflow-get-struct.py
new file mode 100755
index 0000000..78297c5
--- /dev/null
+++ b/tools/pylibopenflow/bin/pyopenflow-get-struct.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python2.5
+"""This script reads struct from OpenFlow header file and output query
+
+(C) Copyright Stanford University
+Author ykk
+Date October 2009
+"""
+import sys
+import getopt
+import openflow
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> struct_name\n"+\
+ "Options:\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ "-c/--cstruct\n\tPrint C struct\n"+\
+ "-n/--name\n\tPrint names of struct\n"+\
+ "-s/--size\n\tPrint size of struct\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hcsn",
+ ["help","cstruct","size","names"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is only struct name
+if not (len(args) == 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Print C struct
+printc = False
+##Print names
+printname = False
+##Print size
+printsize = False
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-s","--size")):
+ printsize = True
+ elif (opt in ("-c","--cstruct")):
+ printc = True
+ elif (opt in ("-n","--names")):
+ printname = True
+ else:
+ assert (False,"Unhandled option :"+opt)
+
+pyopenflow = openflow.messages()
+cstruct = pyopenflow.structs[args[0].strip()]
+pattern = pyopenflow.get_pattern(cstruct)
+
+#Print C struct
+if (printc):
+ print cstruct
+
+#Print pattern
+print "Python pattern = "+str(pattern)
+
+#Print name
+if (printname):
+ print cstruct.get_names()
+
+#Print size
+if (printsize):
+ print "Size = "+str(pyopenflow.get_size(pattern))
+
diff --git a/tools/pylibopenflow/bin/pyopenflow-lavi-pythonize.py b/tools/pylibopenflow/bin/pyopenflow-lavi-pythonize.py
new file mode 100755
index 0000000..bb8c180
--- /dev/null
+++ b/tools/pylibopenflow/bin/pyopenflow-lavi-pythonize.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2.5
+"""This script generate class files for messenger and lavi in NOX,
+specifically it creates a Python class for each data structure.
+
+(C) Copyright Stanford University
+Author ykk
+Date January 2010
+"""
+import sys
+import os.path
+import getopt
+import cheader
+import lavi.pythonize
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> nox_dir\n"+\
+ "Options:\n"+\
+ "-i/--input-dir\n\tSpecify input directory (nox src directory)\n"+\
+ "-t/--template\n\tSpecify (non-default) template file\n"+\
+ "-n/--no-lavi\n\tSpecify that LAVI's file will not be created\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hm:n",
+ ["help","messenger-template","no-lavi"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is only NOX directory given
+if not (len(args) == 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Output LAVI
+outputlavi=True
+##Template file
+templatefile="include/messenger.template.py"
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-t","--template")):
+ templatefile=arg
+ elif (opt in ("-n","--no-lavi")):
+ outputlavi=False
+ else:
+ print "Unhandled option:"+opt
+ sys.exit(2)
+
+#Check for header file in NOX directory
+if not (os.path.isfile(args[0]+"/src/nox/coreapps/messenger/message.hh")):
+ print "Messenger header file not found!"
+ sys.exit(2)
+if (outputlavi):
+ if not (os.path.isfile(args[0]+"/src/nox/netapps/lavi/lavi-message.hh")):
+ print "LAVI message header file not found!"
+ sys.exit(2)
+
+#Get headerfile and pythonizer
+msgheader = cheader.cheaderfile(args[0]+"/src/nox/coreapps/messenger/message.hh")
+mpynizer = lavi.pythonize.msgpythonizer(msgheader)
+if (outputlavi):
+ laviheader = cheader.cheaderfile([args[0]+"/src/nox/coreapps/messenger/message.hh",
+ args[0]+"/src/nox/netapps/lavi/lavi-message.hh"])
+ lpynizer = lavi.pythonize.lavipythonizer(laviheader)
+
+#Generate Python code for messenger
+fileRef = open(args[0]+"/src/nox/coreapps/messenger/messenger.py", "w")
+for x in mpynizer.pycode(templatefile):
+ fileRef.write(x+"\n")
+fileRef.write("\n")
+fileRef.close()
+
+if (outputlavi):
+ fileRef = open(args[0]+"/src/nox/netapps/lavi/lavi.py", "w")
+ for x in lpynizer.pycode(templatefile):
+ fileRef.write(x.replace("def __init__(self,ipAddr,portNo=2603,debug=False):",
+ "def __init__(self,ipAddr,portNo=2503,debug=False):").\
+ replace("def __init__(self, ipAddr, portNo=1304,debug=False):",
+ "def __init__(self, ipAddr, portNo=1305,debug=False):")+\
+ "\n")
+ fileRef.write("\n")
+ fileRef.close()
diff --git a/tools/pylibopenflow/bin/pyopenflow-load-controller.py b/tools/pylibopenflow/bin/pyopenflow-load-controller.py
new file mode 100755
index 0000000..aef34f7
--- /dev/null
+++ b/tools/pylibopenflow/bin/pyopenflow-load-controller.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python2.5
+"""This script fakes as n OpenFlow switch and
+load the controller with k packets per second.
+
+(C) Copyright Stanford University
+Author ykk
+Date January 2010
+"""
+import sys
+import getopt
+import struct
+import openflow
+import time
+import output
+import of.msg
+import of.simu
+import of.network
+import dpkt.ethernet
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> controller\n"+\
+ "Options:\n"+\
+ "-p/--port\n\tSpecify port number\n"+\
+ "-v/--verbose\n\tPrint message exchange\n"+\
+ "-r/--rate\n\tSpecify rate per switch to send packets (default=1)\n"+\
+ "-d/--duration\n\tSpecify duration of load test in seconds (default=5)\n"+\
+ "-s/--switch\n\tSpecify number of switches (default=1)\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hvp:s:d:r:",
+ ["help","verbose","port=",
+ "switch=","duration=","rate="])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is only controller
+if not (len(args) == 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Duration
+duration = 5
+##Rate
+rate = 1.0
+##Switch number
+swno = 1
+##Port to connect to
+port = 6633
+##Set output mode
+output.set_mode("INFO")
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-v","--verbose")):
+ output.set_mode("DBG")
+ elif (opt in ("-p","--port")):
+ port=int(arg)
+ elif (opt in ("-s","--switch")):
+ swno=int(arg)
+ elif (opt in ("-d","--duration")):
+ duration=int(arg)
+ elif (opt in ("-r","--rate")):
+ rate=float(arg)
+ else:
+ print "Unhandled option :"+opt
+ sys.exit(2)
+
+#Form packet
+pkt = dpkt.ethernet.Ethernet()
+pkt.type = dpkt.ethernet.ETH_MIN
+pkt.dst = '\xFF\xFF\xFF\xFF\xFF\xFF'
+
+#Connect to controller
+ofmsg = openflow.messages()
+parser = of.msg.parser(ofmsg)
+ofnet = of.simu.network()
+for i in range(1,swno+1):
+ ofsw = of.simu.switch(ofmsg, args[0], port,
+ dpid=i,
+ parser=parser)
+ ofnet.add_switch(ofsw)
+ ofsw.send_hello()
+
+output.info("Running "+str(swno)+" switches at "+str(rate)+\
+ " packets per seconds for "+str(duration)+" s")
+
+starttime = time.time()
+running = True
+interval = 1.0/(rate*swno)
+ntime=time.time()+(interval/10.0)
+swindex = 0
+pcount = 0
+rcount = 0
+while running:
+ ctime = time.time()
+ time.sleep(max(0,min(ntime-ctime,interval/10.0)))
+
+ if ((ctime-starttime) <= duration):
+ #Send packet if time's up
+ if (ctime >= ntime):
+ ntime += interval
+ pkt.src = struct.pack("Q",pcount)[:6]
+ ofnet.switches[swindex].send_packet(1,10,pkt.pack()+'A'*3)
+ pcount += 1
+ swno += 1
+ if (swno >= len(ofnet.switches)):
+ swno=0
+
+ #Process any received message
+ (ofsw, msg) = ofnet.connections.msgreceive()
+ while (msg != None):
+ dic = ofmsg.peek_from_front("ofp_header", msg)
+ if (dic["type"][0] == ofmsg.get_value("OFPT_FLOW_MOD")):
+ output.dbg("Received flow mod")
+ rcount += 1
+ ofsw.receive_openflow(msg)
+ (ofsw, msg) = ofnet.connections.msgreceive()
+ else:
+ running = False
+
+output.info("Sent "+str(pcount)+" packets at rate "+\
+ str(float(pcount)/float(duration))+" and received "+\
+ str(rcount)+" back")
diff --git a/tools/pylibopenflow/bin/pyopenflow-ping-controller.py b/tools/pylibopenflow/bin/pyopenflow-ping-controller.py
new file mode 100755
index 0000000..e5aa030
--- /dev/null
+++ b/tools/pylibopenflow/bin/pyopenflow-ping-controller.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python2.5
+"""This script fakes as an OpenFlow switch to the controller
+
+(C) Copyright Stanford University
+Author ykk
+Date October 2009
+"""
+import sys
+import getopt
+import openflow
+import time
+import output
+import of.msg
+import of.simu
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> controller\n"+\
+ "Options:\n"+\
+ "-p/--port\n\tSpecify port number\n"+\
+ "-v/--verbose\n\tPrint message exchange\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hvp:",
+ ["help","verbose","port="])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is only controller
+if not (len(args) == 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Port to connect to
+port = 6633
+##Set output mode
+output.set_mode("INFO")
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-v","--verbose")):
+ output.set_mode("DBG")
+ elif (opt in ("-p","--port")):
+ port=int(arg)
+ else:
+ assert (False,"Unhandled option :"+opt)
+
+#Connect to controller
+ofmsg = openflow.messages()
+parser = of.msg.parser(ofmsg)
+ofsw = of.simu.switch(ofmsg, args[0], port,
+ dpid=int("0xcafecafe",16),
+ parser=parser)
+ofsw.send_hello()
+#Send echo and wait
+xid = 22092009
+running = True
+ofsw.send_echo(xid)
+starttime = time.time()
+while running:
+ msg = ofsw.connection.msgreceive(True, 0.00001)
+ pkttime = time.time()
+ dic = ofmsg.peek_from_front("ofp_header", msg)
+ if (dic["type"][0] == ofmsg.get_value("OFPT_ECHO_REPLY") and
+ dic["xid"][0] == xid):
+ #Check reply for echo request
+ output.info("Received echo reply after "+\
+ str((pkttime-starttime)*1000)+" ms", "ping-controller")
+ running = False
+ else:
+ ofsw.receive_openflow(msg)
diff --git a/tools/pylibopenflow/bin/pyopenflow-pythonize.py b/tools/pylibopenflow/bin/pyopenflow-pythonize.py
new file mode 100755
index 0000000..466c35d
--- /dev/null
+++ b/tools/pylibopenflow/bin/pyopenflow-pythonize.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python2.5
+"""This script generate openflow-packets.py which
+creates Python class for each data structure in openflow.h.
+
+(C) Copyright Stanford University
+Author ykk
+Date December 2009
+"""
+import sys
+sys.path.append('./bin')
+sys.path.append('./pylib')
+import getopt
+import openflow
+import time
+import output
+import of.pythonize
+
+def usage():
+ """Display usage
+ """
+ print "Usage "+sys.argv[0]+" <options> output_file\n"+\
+ "Options:\n"+\
+ "-i/--input\n\tSpecify (non-default) OpenFlow header\n"+\
+ "-t/--template\n\tSpecify (non-default) template file\n"+\
+ "-h/--help\n\tPrint this usage guide\n"+\
+ ""
+
+#Parse options and arguments
+try:
+ opts, args = getopt.getopt(sys.argv[1:], "hi:t:",
+ ["help","input","template"])
+except getopt.GetoptError:
+ usage()
+ sys.exit(2)
+
+#Check there is only output file
+if not (len(args) == 1):
+ usage()
+ sys.exit(2)
+
+#Parse options
+##Input
+headerfile=None
+##Template file
+templatefile=None
+for opt,arg in opts:
+ if (opt in ("-h","--help")):
+ usage()
+ sys.exit(0)
+ elif (opt in ("-i","--input")):
+ headerfile=arg
+ elif (opt in ("-t","--template")):
+ templatefile=arg
+ else:
+ print "Unhandled option:"+opt
+ sys.exit(2)
+
+#Generate Python code
+ofmsg = openflow.messages(headerfile)
+pynizer = of.pythonize.pythonizer(ofmsg)
+
+fileRef = open(args[0], "w")
+for x in pynizer.pycode(templatefile):
+ fileRef.write(x+"\n")
+fileRef.write("\n")
+fileRef.close()
diff --git a/tools/pylibopenflow/include/Put C header files here... b/tools/pylibopenflow/include/Put C header files here...
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/pylibopenflow/include/Put C header files here...
diff --git a/tools/pylibopenflow/include/messenger.template.py b/tools/pylibopenflow/include/messenger.template.py
new file mode 100644
index 0000000..25e7c76
--- /dev/null
+++ b/tools/pylibopenflow/include/messenger.template.py
@@ -0,0 +1,115 @@
+import socket
+import select
+
+## This module provides library to send and receive messages to NOX's messenger
+#
+# This is a rewrite of noxmsg.py from OpenRoads (OpenFlow Wireless)
+#
+# @author ykk (Stanford University)
+# @date January, 2010
+# @see messenger
+
+def stringarray(string):
+ """Output array of binary values in string.
+ """
+ arrstr = ""
+ if (len(string) != 0):
+ for i in range(0,len(string)):
+ arrstr += "%x " % struct.unpack("=B",string[i])[0]
+ return arrstr
+
+def printarray(string):
+ """Print array of binary values
+ """
+ print "Array of length "+str(len(string))
+ print stringarray(string)
+
+class channel:
+ """TCP channel to communicate to NOX with.
+ """
+ def __init__(self,ipAddr,portNo=2603,debug=False):
+ """Initialize with socket
+ """
+ ##Socket reference for channel
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((ipAddr,portNo))
+ self.debug = debug
+ ##Internal buffer for receiving
+ self.__buffer = ""
+ ##Internal reference to header
+ self.__header = messenger_msg()
+
+ def baresend(self, msg):
+ """Send bare message"""
+ self.sock.send(msg)
+
+ def send(self,msg):
+ """Send message
+ """
+ msgh = messenger_msg()
+ remaining = msgh.unpack(msg)
+ if (msgh.length != len(msg)):
+ msgh.length = len(msg)
+ msg = msgh.pack()+remaining
+ self.baresend(msg)
+ if (self.debug):
+ printarray(msg)
+
+ def receive(self, recvLen=0,timeout=0):
+ """Receive command
+ If length == None, nonblocking receive (return None or message)
+ With nonblocking receive, timeout is used for select statement
+
+ If length is zero, return single message
+ """
+ if (recvLen==0):
+ #Receive full message
+ msg=""
+ length=len(self.__header)
+ while (len(msg) < length):
+ msg+=self.sock.recv(1)
+ #Get length
+ if (len(msg) == length):
+ self.__header.unpack(msg)
+ length=self.__header.length
+ return msg
+ elif (recvLen==None):
+ #Non-blocking receive
+ ready_to_read = select.select([self.sock],[],[],timeout)[0]
+ if (ready_to_read):
+ self.__buffer += self.sock.recv(1)
+ if (len(self.__buffer) >= len(self.__header)):
+ self.__header.unpack(self.__buffer)
+ if (self.__header.length == len(self.__buffer)):
+ msg = self.__buffer
+ self.__buffer = ""
+ return msg
+ return None
+ else:
+ #Fixed length blocking receive
+ return self.sock.recv(recvLen)
+
+ def __del__(self):
+ """Terminate connection
+ """
+ emsg = messenger_msg()
+ emsg.type = MSG_DISCONNECT
+ emsg.length = len(emsg)
+ self.send(emsg.pack())
+ self.sock.shutdown(1)
+ self.sock.close()
+
+class sslChannel(channel):
+ """SSL channel to communicate to NOX with.
+ """
+ def __init__(self, ipAddr, portNo=1304,debug=False):
+ """Initialize with SSL sock
+ """
+ NOXChannel.__init__(self, ipAddr, portNo,debug)
+ ##Reference to SSL socket for channel
+ self.sslsock = socket.ssl(self.sock)
+
+ def baresend(self, msg):
+ """Send bare message"""
+ self.sslsock.write(msg)
+
diff --git a/tools/pylibopenflow/include/openflow.h b/tools/pylibopenflow/include/openflow.h
new file mode 100644
index 0000000..9e662aa
--- /dev/null
+++ b/tools/pylibopenflow/include/openflow.h
@@ -0,0 +1,970 @@
+/* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
+ * Junior University
+ *
+ * We are making the OpenFlow specification and associated documentation
+ * (Software) available for public use and benefit with the expectation
+ * that others will use, modify and enhance the Software and contribute
+ * those enhancements back to the community. However, since we would
+ * like to make the Software available for broadest use, with as few
+ * restrictions as possible permission is hereby granted, free of
+ * charge, to any person obtaining a copy of this Software to deal in
+ * the Software under the copyrights without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * The name and trademarks of copyright holder(s) may NOT be used in
+ * advertising or publicity pertaining to the Software or any
+ * derivatives without specific, written prior permission.
+ */
+
+/* OpenFlow: protocol between controller and datapath. */
+
+#ifndef OPENFLOW_OPENFLOW_H
+#define OPENFLOW_OPENFLOW_H 1
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#ifdef SWIG
+#define OFP_ASSERT(EXPR) /* SWIG can't handle OFP_ASSERT. */
+#elif !defined(__cplusplus)
+/* Build-time assertion for use in a declaration context. */
+#define OFP_ASSERT(EXPR) \
+ extern int (*build_assert(void))[ sizeof(struct { \
+ unsigned int build_assert_failed : (EXPR) ? 1 : -1; })]
+#else /* __cplusplus */
+#define OFP_ASSERT(_EXPR) typedef int build_assert_failed[(_EXPR) ? 1 : -1]
+#endif /* __cplusplus */
+
+#ifndef SWIG
+#define OFP_PACKED __attribute__((packed))
+#else
+#define OFP_PACKED /* SWIG doesn't understand __attribute. */
+#endif
+
+/* Version number:
+ * Non-experimental versions released: 0x01
+ * Experimental versions released: 0x81 -- 0x99
+ */
+/* The most significant bit being set in the version field indicates an
+ * experimental OpenFlow version.
+ */
+#define OFP_VERSION 0x99
+
+#define OFP_MAX_TABLE_NAME_LEN 32
+#define OFP_MAX_PORT_NAME_LEN 16
+
+#define OFP_TCP_PORT 6633
+#define OFP_SSL_PORT 6633
+
+#define OFP_ETH_ALEN 6 /* Bytes in an Ethernet address. */
+
+/* Port numbering. Physical ports are numbered starting from 1. */
+enum ofp_port {
+ /* Maximum number of physical switch ports. */
+ OFPP_MAX = 0xff00,
+
+ /* Fake output "ports". */
+ OFPP_IN_PORT = 0xfff8, /* Send the packet out the input port. This
+ virtual port must be explicitly used
+ in order to send back out of the input
+ port. */
+ OFPP_TABLE = 0xfff9, /* Perform actions in flow table.
+ NB: This can only be the destination
+ port for packet-out messages. */
+ OFPP_NORMAL = 0xfffa, /* Process with normal L2/L3 switching. */
+ OFPP_FLOOD = 0xfffb, /* All physical ports except input port and
+ those disabled by STP. */
+ OFPP_ALL = 0xfffc, /* All physical ports except input port. */
+ OFPP_CONTROLLER = 0xfffd, /* Send to controller. */
+ OFPP_LOCAL = 0xfffe, /* Local openflow "port". */
+ OFPP_NONE = 0xffff /* Not associated with a physical port. */
+};
+
+enum ofp_type {
+ /* Immutable messages. */
+ OFPT_HELLO, /* Symmetric message */
+ OFPT_ERROR, /* Symmetric message */
+ OFPT_ECHO_REQUEST, /* Symmetric message */
+ OFPT_ECHO_REPLY, /* Symmetric message */
+ OFPT_VENDOR, /* Symmetric message */
+
+ /* Switch configuration messages. */
+ OFPT_FEATURES_REQUEST, /* Controller/switch message */
+ OFPT_FEATURES_REPLY, /* Controller/switch message */
+ OFPT_GET_CONFIG_REQUEST, /* Controller/switch message */
+ OFPT_GET_CONFIG_REPLY, /* Controller/switch message */
+ OFPT_SET_CONFIG, /* Controller/switch message */
+
+ /* Asynchronous messages. */
+ OFPT_PACKET_IN, /* Async message */
+ OFPT_FLOW_REMOVED, /* Async message */
+ OFPT_PORT_STATUS, /* Async message */
+
+ /* Controller command messages. */
+ OFPT_PACKET_OUT, /* Controller/switch message */
+ OFPT_FLOW_MOD, /* Controller/switch message */
+ OFPT_PORT_MOD, /* Controller/switch message */
+
+ /* Statistics messages. */
+ OFPT_STATS_REQUEST, /* Controller/switch message */
+ OFPT_STATS_REPLY, /* Controller/switch message */
+
+ /* Barrier messages. */
+ OFPT_BARRIER_REQUEST, /* Controller/switch message */
+ OFPT_BARRIER_REPLY, /* Controller/switch message */
+
+ /* Queue Configuration messages. */
+ OFPT_QUEUE_GET_CONFIG_REQUEST, /* Controller/switch message */
+ OFPT_QUEUE_GET_CONFIG_REPLY /* Controller/switch message */
+
+};
+
+/* Header on all OpenFlow packets. */
+struct ofp_header {
+ uint8_t version; /* OFP_VERSION. */
+ uint8_t type; /* One of the OFPT_ constants. */
+ uint16_t length; /* Length including this ofp_header. */
+ uint32_t xid; /* Transaction id associated with this packet.
+ Replies use the same id as was in the request
+ to facilitate pairing. */
+};
+OFP_ASSERT(sizeof(struct ofp_header) == 8);
+
+/* OFPT_HELLO. This message has an empty body, but implementations must
+ * ignore any data included in the body, to allow for future extensions. */
+struct ofp_hello {
+ struct ofp_header header;
+};
+
+#define OFP_DEFAULT_MISS_SEND_LEN 128
+
+enum ofp_config_flags {
+ /* Handling of IP fragments. */
+ OFPC_FRAG_NORMAL = 0, /* No special handling for fragments. */
+ OFPC_FRAG_DROP = 1, /* Drop fragments. */
+ OFPC_FRAG_REASM = 2, /* Reassemble (only if OFPC_IP_REASM set). */
+ OFPC_FRAG_MASK = 3
+};
+
+/* Switch configuration. */
+struct ofp_switch_config {
+ struct ofp_header header;
+ uint16_t flags; /* OFPC_* flags. */
+ uint16_t miss_send_len; /* Max bytes of new flow that datapath should
+ send to the controller. */
+};
+OFP_ASSERT(sizeof(struct ofp_switch_config) == 12);
+
+/* Capabilities supported by the datapath. */
+enum ofp_capabilities {
+ OFPC_FLOW_STATS = 1 << 0, /* Flow statistics. */
+ OFPC_TABLE_STATS = 1 << 1, /* Table statistics. */
+ OFPC_PORT_STATS = 1 << 2, /* Port statistics. */
+ OFPC_STP = 1 << 3, /* 802.1d spanning tree. */
+ OFPC_RESERVED = 1 << 4, /* Reserved, must be zero. */
+ OFPC_IP_REASM = 1 << 5, /* Can reassemble IP fragments. */
+ OFPC_QUEUE_STATS = 1 << 6, /* Queue statistics. */
+ OFPC_ARP_MATCH_IP = 1 << 7 /* Match IP addresses in ARP pkts. */
+};
+
+/* Flags to indicate behavior of the physical port. These flags are
+ * used in ofp_phy_port to describe the current configuration. They are
+ * used in the ofp_port_mod message to configure the port's behavior.
+ */
+enum ofp_port_config {
+ OFPPC_PORT_DOWN = 1 << 0, /* Port is administratively down. */
+
+ OFPPC_NO_STP = 1 << 1, /* Disable 802.1D spanning tree on port. */
+ OFPPC_NO_RECV = 1 << 2, /* Drop all packets except 802.1D spanning
+ tree packets. */
+ OFPPC_NO_RECV_STP = 1 << 3, /* Drop received 802.1D STP packets. */
+ OFPPC_NO_FLOOD = 1 << 4, /* Do not include this port when flooding. */
+ OFPPC_NO_FWD = 1 << 5, /* Drop packets forwarded to port. */
+ OFPPC_NO_PACKET_IN = 1 << 6 /* Do not send packet-in msgs for port. */
+};
+
+/* Current state of the physical port. These are not configurable from
+ * the controller.
+ */
+enum ofp_port_state {
+ OFPPS_LINK_DOWN = 1 << 0, /* No physical link present. */
+
+ /* The OFPPS_STP_* bits have no effect on switch operation. The
+ * controller must adjust OFPPC_NO_RECV, OFPPC_NO_FWD, and
+ * OFPPC_NO_PACKET_IN appropriately to fully implement an 802.1D spanning
+ * tree. */
+ OFPPS_STP_LISTEN = 0 << 8, /* Not learning or relaying frames. */
+ OFPPS_STP_LEARN = 1 << 8, /* Learning but not relaying frames. */
+ OFPPS_STP_FORWARD = 2 << 8, /* Learning and relaying frames. */
+ OFPPS_STP_BLOCK = 3 << 8, /* Not part of spanning tree. */
+ OFPPS_STP_MASK = 3 << 8 /* Bit mask for OFPPS_STP_* values. */
+};
+
+/* Features of physical ports available in a datapath. */
+enum ofp_port_features {
+ OFPPF_10MB_HD = 1 << 0, /* 10 Mb half-duplex rate support. */
+ OFPPF_10MB_FD = 1 << 1, /* 10 Mb full-duplex rate support. */
+ OFPPF_100MB_HD = 1 << 2, /* 100 Mb half-duplex rate support. */
+ OFPPF_100MB_FD = 1 << 3, /* 100 Mb full-duplex rate support. */
+ OFPPF_1GB_HD = 1 << 4, /* 1 Gb half-duplex rate support. */
+ OFPPF_1GB_FD = 1 << 5, /* 1 Gb full-duplex rate support. */
+ OFPPF_10GB_FD = 1 << 6, /* 10 Gb full-duplex rate support. */
+ OFPPF_COPPER = 1 << 7, /* Copper medium. */
+ OFPPF_FIBER = 1 << 8, /* Fiber medium. */
+ OFPPF_AUTONEG = 1 << 9, /* Auto-negotiation. */
+ OFPPF_PAUSE = 1 << 10, /* Pause. */
+ OFPPF_PAUSE_ASYM = 1 << 11 /* Asymmetric pause. */
+};
+
+/* Description of a physical port */
+struct ofp_phy_port {
+ uint16_t port_no;
+ uint8_t hw_addr[OFP_ETH_ALEN];
+ char name[OFP_MAX_PORT_NAME_LEN]; /* Null-terminated */
+
+ uint32_t config; /* Bitmap of OFPPC_* flags. */
+ uint32_t state; /* Bitmap of OFPPS_* flags. */
+
+ /* Bitmaps of OFPPF_* that describe features. All bits zeroed if
+ * unsupported or unavailable. */
+ uint32_t curr; /* Current features. */
+ uint32_t advertised; /* Features being advertised by the port. */
+ uint32_t supported; /* Features supported by the port. */
+ uint32_t peer; /* Features advertised by peer. */
+};
+OFP_ASSERT(sizeof(struct ofp_phy_port) == 48);
+
+/* Switch features. */
+struct ofp_switch_features {
+ struct ofp_header header;
+ uint64_t datapath_id; /* Datapath unique ID. The lower 48-bits are for
+ a MAC address, while the upper 16-bits are
+ implementer-defined. */
+
+ uint32_t n_buffers; /* Max packets buffered at once. */
+
+ uint8_t n_tables; /* Number of tables supported by datapath. */
+ uint8_t pad[3]; /* Align to 64-bits. */
+
+ /* Features. */
+ uint32_t capabilities; /* Bitmap of support "ofp_capabilities". */
+ uint32_t actions; /* Bitmap of supported "ofp_action_type"s. */
+
+ /* Port info.*/
+ struct ofp_phy_port ports[0]; /* Port definitions. The number of ports
+ is inferred from the length field in
+ the header. */
+};
+OFP_ASSERT(sizeof(struct ofp_switch_features) == 32);
+
+/* What changed about the physical port */
+enum ofp_port_reason {
+ OFPPR_ADD, /* The port was added. */
+ OFPPR_DELETE, /* The port was removed. */
+ OFPPR_MODIFY /* Some attribute of the port has changed. */
+};
+
+/* A physical port has changed in the datapath */
+struct ofp_port_status {
+ struct ofp_header header;
+ uint8_t reason; /* One of OFPPR_*. */
+ uint8_t pad[7]; /* Align to 64-bits. */
+ struct ofp_phy_port desc;
+};
+OFP_ASSERT(sizeof(struct ofp_port_status) == 64);
+
+/* Modify behavior of the physical port */
+struct ofp_port_mod {
+ struct ofp_header header;
+ uint16_t port_no;
+ uint8_t hw_addr[OFP_ETH_ALEN]; /* The hardware address is not
+ configurable. This is used to
+ sanity-check the request, so it must
+ be the same as returned in an
+ ofp_phy_port struct. */
+
+ uint32_t config; /* Bitmap of OFPPC_* flags. */
+ uint32_t mask; /* Bitmap of OFPPC_* flags to be changed. */
+
+ uint32_t advertise; /* Bitmap of "ofp_port_features"s. Zero all
+ bits to prevent any action taking place. */
+ uint8_t pad[4]; /* Pad to 64-bits. */
+};
+OFP_ASSERT(sizeof(struct ofp_port_mod) == 32);
+
+/* Why is this packet being sent to the controller? */
+enum ofp_packet_in_reason {
+ OFPR_NO_MATCH, /* No matching flow. */
+ OFPR_ACTION /* Action explicitly output to controller. */
+};
+
+/* Packet received on port (datapath -> controller). */
+struct ofp_packet_in {
+ struct ofp_header header;
+ uint32_t buffer_id; /* ID assigned by datapath. */
+ uint16_t total_len; /* Full length of frame. */
+ uint16_t in_port; /* Port on which frame was received. */
+ uint8_t reason; /* Reason packet is being sent (one of OFPR_*) */
+ uint8_t pad;
+ uint8_t data[0]; /* Ethernet frame, halfway through 32-bit word,
+ so the IP header is 32-bit aligned. The
+ amount of data is inferred from the length
+ field in the header. Because of padding,
+ offsetof(struct ofp_packet_in, data) ==
+ sizeof(struct ofp_packet_in) - 2. */
+};
+OFP_ASSERT(sizeof(struct ofp_packet_in) == 20);
+
+enum ofp_action_type {
+ OFPAT_OUTPUT, /* Output to switch port. */
+ OFPAT_SET_VLAN_VID, /* Set the 802.1q VLAN id. */
+ OFPAT_SET_VLAN_PCP, /* Set the 802.1q priority. */
+ OFPAT_STRIP_VLAN, /* Strip the 802.1q header. */
+ OFPAT_SET_DL_SRC, /* Ethernet source address. */
+ OFPAT_SET_DL_DST, /* Ethernet destination address. */
+ OFPAT_SET_NW_SRC, /* IP source address. */
+ OFPAT_SET_NW_DST, /* IP destination address. */
+ OFPAT_SET_NW_TOS, /* IP ToS (DSCP field, 6 bits). */
+ OFPAT_SET_TP_SRC, /* TCP/UDP source port. */
+ OFPAT_SET_TP_DST, /* TCP/UDP destination port. */
+ OFPAT_ENQUEUE, /* Output to queue. */
+ OFPAT_VENDOR = 0xffff
+};
+
+/* Action structure for OFPAT_OUTPUT, which sends packets out 'port'.
+ * When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
+ * number of bytes to send. A 'max_len' of zero means no bytes of the
+ * packet should be sent.*/
+struct ofp_action_output {
+ uint16_t type; /* OFPAT_OUTPUT. */
+ uint16_t len; /* Length is 8. */
+ uint16_t port; /* Output port. */
+ uint16_t max_len; /* Max length to send to controller. */
+};
+OFP_ASSERT(sizeof(struct ofp_action_output) == 8);
+
+/* The VLAN id is 12 bits, so we can use the entire 16 bits to indicate
+ * special conditions. All ones is used to match that no VLAN id was
+ * set. */
+#define OFP_VLAN_NONE 0xffff
+
+/* Action structure for OFPAT_SET_VLAN_VID. */
+struct ofp_action_vlan_vid {
+ uint16_t type; /* OFPAT_SET_VLAN_VID. */
+ uint16_t len; /* Length is 8. */
+ uint16_t vlan_vid; /* VLAN id. */
+ uint8_t pad[2];
+};
+OFP_ASSERT(sizeof(struct ofp_action_vlan_vid) == 8);
+
+/* Action structure for OFPAT_SET_VLAN_PCP. */
+struct ofp_action_vlan_pcp {
+ uint16_t type; /* OFPAT_SET_VLAN_PCP. */
+ uint16_t len; /* Length is 8. */
+ uint8_t vlan_pcp; /* VLAN priority. */
+ uint8_t pad[3];
+};
+OFP_ASSERT(sizeof(struct ofp_action_vlan_pcp) == 8);
+
+/* Action structure for OFPAT_SET_DL_SRC/DST. */
+struct ofp_action_dl_addr {
+ uint16_t type; /* OFPAT_SET_DL_SRC/DST. */
+ uint16_t len; /* Length is 16. */
+ uint8_t dl_addr[OFP_ETH_ALEN]; /* Ethernet address. */
+ uint8_t pad[6];
+};
+OFP_ASSERT(sizeof(struct ofp_action_dl_addr) == 16);
+
+/* Action structure for OFPAT_SET_NW_SRC/DST. */
+struct ofp_action_nw_addr {
+ uint16_t type; /* OFPAT_SET_TW_SRC/DST. */
+ uint16_t len; /* Length is 8. */
+ uint32_t nw_addr; /* IP address. */
+};
+OFP_ASSERT(sizeof(struct ofp_action_nw_addr) == 8);
+
+/* Action structure for OFPAT_SET_TP_SRC/DST. */
+struct ofp_action_tp_port {
+ uint16_t type; /* OFPAT_SET_TP_SRC/DST. */
+ uint16_t len; /* Length is 8. */
+ uint16_t tp_port; /* TCP/UDP port. */
+ uint8_t pad[2];
+};
+OFP_ASSERT(sizeof(struct ofp_action_tp_port) == 8);
+
+/* Action structure for OFPAT_SET_NW_TOS. */
+struct ofp_action_nw_tos {
+ uint16_t type; /* OFPAT_SET_TW_SRC/DST. */
+ uint16_t len; /* Length is 8. */
+ uint8_t nw_tos; /* IP ToS (DSCP field, 6 bits). */
+ uint8_t pad[3];
+};
+OFP_ASSERT(sizeof(struct ofp_action_nw_tos) == 8);
+
+/* Action header for OFPAT_VENDOR. The rest of the body is vendor-defined. */
+struct ofp_action_vendor_header {
+ uint16_t type; /* OFPAT_VENDOR. */
+ uint16_t len; /* Length is a multiple of 8. */
+ uint32_t vendor; /* Vendor ID, which takes the same form
+ as in "struct ofp_vendor_header". */
+};
+OFP_ASSERT(sizeof(struct ofp_action_vendor_header) == 8);
+
+/* Action header that is common to all actions. The length includes the
+ * header and any padding used to make the action 64-bit aligned.
+ * NB: The length of an action *must* always be a multiple of eight. */
+struct ofp_action_header {
+ uint16_t type; /* One of OFPAT_*. */
+ uint16_t len; /* Length of action, including this
+ header. This is the length of action,
+ including any padding to make it
+ 64-bit aligned. */
+ uint8_t pad[4];
+};
+OFP_ASSERT(sizeof(struct ofp_action_header) == 8);
+
+/* Send packet (controller -> datapath). */
+struct ofp_packet_out {
+ struct ofp_header header;
+ uint32_t buffer_id; /* ID assigned by datapath (-1 if none). */
+ uint16_t in_port; /* Packet's input port (OFPP_NONE if none). */
+ uint16_t actions_len; /* Size of action array in bytes. */
+ struct ofp_action_header actions[0]; /* Actions. */
+ /* uint8_t data[0]; */ /* Packet data. The length is inferred
+ from the length field in the header.
+ (Only meaningful if buffer_id == -1.) */
+};
+OFP_ASSERT(sizeof(struct ofp_packet_out) == 16);
+
+enum ofp_flow_mod_command {
+ OFPFC_ADD, /* New flow. */
+ OFPFC_MODIFY, /* Modify all matching flows. */
+ OFPFC_MODIFY_STRICT, /* Modify entry strictly matching wildcards */
+ OFPFC_DELETE, /* Delete all matching flows. */
+ OFPFC_DELETE_STRICT /* Strictly match wildcards and priority. */
+};
+
+/* Flow wildcards. */
+enum ofp_flow_wildcards {
+ OFPFW_IN_PORT = 1 << 0, /* Switch input port. */
+ OFPFW_DL_VLAN = 1 << 1, /* VLAN id. */
+ OFPFW_DL_SRC = 1 << 2, /* Ethernet source address. */
+ OFPFW_DL_DST = 1 << 3, /* Ethernet destination address. */
+ OFPFW_DL_TYPE = 1 << 4, /* Ethernet frame type. */
+ OFPFW_NW_PROTO = 1 << 5, /* IP protocol. */
+ OFPFW_TP_SRC = 1 << 6, /* TCP/UDP source port. */
+ OFPFW_TP_DST = 1 << 7, /* TCP/UDP destination port. */
+
+ /* IP source address wildcard bit count. 0 is exact match, 1 ignores the
+ * LSB, 2 ignores the 2 least-significant bits, ..., 32 and higher wildcard
+ * the entire field. This is the *opposite* of the usual convention where
+ * e.g. /24 indicates that 8 bits (not 24 bits) are wildcarded. */
+ OFPFW_NW_SRC_SHIFT = 8,
+ OFPFW_NW_SRC_BITS = 6,
+ OFPFW_NW_SRC_MASK = ((1 << OFPFW_NW_SRC_BITS) - 1) << OFPFW_NW_SRC_SHIFT,
+ OFPFW_NW_SRC_ALL = 32 << OFPFW_NW_SRC_SHIFT,
+
+ /* IP destination address wildcard bit count. Same format as source. */
+ OFPFW_NW_DST_SHIFT = 14,
+ OFPFW_NW_DST_BITS = 6,
+ OFPFW_NW_DST_MASK = ((1 << OFPFW_NW_DST_BITS) - 1) << OFPFW_NW_DST_SHIFT,
+ OFPFW_NW_DST_ALL = 32 << OFPFW_NW_DST_SHIFT,
+
+ OFPFW_DL_VLAN_PCP = 1 << 20, /* VLAN priority. */
+ OFPFW_NW_TOS = 1 << 21, /* IP ToS (DSCP field, 6 bits). */
+
+ /* Wildcard all fields. */
+ OFPFW_ALL = ((1 << 22) - 1)
+};
+
+/* The wildcards for ICMP type and code fields use the transport source
+ * and destination port fields, respectively. */
+#define OFPFW_ICMP_TYPE OFPFW_TP_SRC
+#define OFPFW_ICMP_CODE OFPFW_TP_DST
+
+/* Values below this cutoff are 802.3 packets and the two bytes
+ * following MAC addresses are used as a frame length. Otherwise, the
+ * two bytes are used as the Ethernet type.
+ */
+#define OFP_DL_TYPE_ETH2_CUTOFF 0x0600
+
+/* Value of dl_type to indicate that the frame does not include an
+ * Ethernet type.
+ */
+#define OFP_DL_TYPE_NOT_ETH_TYPE 0x05ff
+
+/* The VLAN id is 12-bits, so we can use the entire 16 bits to indicate
+ * special conditions. All ones indicates that no VLAN id was set.
+ */
+#define OFP_VLAN_NONE 0xffff
+
+/* Fields to match against flows */
+struct ofp_match {
+ uint32_t wildcards; /* Wildcard fields. */
+ uint16_t in_port; /* Input switch port. */
+ uint8_t dl_src[OFP_ETH_ALEN]; /* Ethernet source address. */
+ uint8_t dl_dst[OFP_ETH_ALEN]; /* Ethernet destination address. */
+ uint16_t dl_vlan; /* Input VLAN id. */
+ uint8_t dl_vlan_pcp; /* Input VLAN priority. */
+ uint8_t pad1[1]; /* Align to 64-bits */
+ uint16_t dl_type; /* Ethernet frame type. */
+ uint8_t nw_tos; /* IP ToS (actually DSCP field, 6 bits). */
+ uint8_t nw_proto; /* IP protocol or lower 8 bits of
+ * ARP opcode. */
+ uint8_t pad2[2]; /* Align to 64-bits */
+ uint32_t nw_src; /* IP source address. */
+ uint32_t nw_dst; /* IP destination address. */
+ uint16_t tp_src; /* TCP/UDP source port. */
+ uint16_t tp_dst; /* TCP/UDP destination port. */
+};
+OFP_ASSERT(sizeof(struct ofp_match) == 40);
+
+/* The match fields for ICMP type and code use the transport source and
+ * destination port fields, respectively. */
+#define icmp_type tp_src
+#define icmp_code tp_dst
+
+/* Value used in "idle_timeout" and "hard_timeout" to indicate that the entry
+ * is permanent. */
+#define OFP_FLOW_PERMANENT 0
+
+/* By default, choose a priority in the middle. */
+#define OFP_DEFAULT_PRIORITY 0x8000
+
+enum ofp_flow_mod_flags {
+ OFPFF_SEND_FLOW_REM = 1 << 0, /* Send flow removed message when flow
+ * expires or is deleted. */
+ OFPFF_CHECK_OVERLAP = 1 << 1, /* Check for overlapping entries first. */
+ OFPFF_EMERG = 1 << 2 /* Remark this is for emergency. */
+};
+
+/* Flow setup and teardown (controller -> datapath). */
+struct ofp_flow_mod {
+ struct ofp_header header;
+ struct ofp_match match; /* Fields to match */
+ uint64_t cookie; /* Opaque controller-issued identifier. */
+
+ /* Flow actions. */
+ uint16_t command; /* One of OFPFC_*. */
+ uint16_t idle_timeout; /* Idle time before discarding (seconds). */
+ uint16_t hard_timeout; /* Max time before discarding (seconds). */
+ uint16_t priority; /* Priority level of flow entry. */
+ uint32_t buffer_id; /* Buffered packet to apply to (or -1).
+ Not meaningful for OFPFC_DELETE*. */
+ uint16_t out_port; /* For OFPFC_DELETE* commands, require
+ matching entries to include this as an
+ output port. A value of OFPP_NONE
+ indicates no restriction. */
+ uint16_t flags; /* One of OFPFF_*. */
+ struct ofp_action_header actions[0]; /* The action length is inferred
+ from the length field in the
+ header. */
+};
+OFP_ASSERT(sizeof(struct ofp_flow_mod) == 72);
+
+/* Why was this flow removed? */
+enum ofp_flow_removed_reason {
+ OFPRR_IDLE_TIMEOUT, /* Flow idle time exceeded idle_timeout. */
+ OFPRR_HARD_TIMEOUT, /* Time exceeded hard_timeout. */
+ OFPRR_DELETE /* Evicted by a DELETE flow mod. */
+};
+
+/* Flow removed (datapath -> controller). */
+struct ofp_flow_removed {
+ struct ofp_header header;
+ struct ofp_match match; /* Description of fields. */
+ uint64_t cookie; /* Opaque controller-issued identifier. */
+
+ uint16_t priority; /* Priority level of flow entry. */
+ uint8_t reason; /* One of OFPRR_*. */
+ uint8_t pad[1]; /* Align to 32-bits. */
+
+ uint32_t duration_sec; /* Time flow was alive in seconds. */
+ uint32_t duration_nsec; /* Time flow was alive in nanoseconds beyond
+ duration_sec. */
+ uint16_t idle_timeout; /* Idle timeout from original flow mod. */
+ uint8_t pad2[2]; /* Align to 64-bits. */
+ uint64_t packet_count;
+ uint64_t byte_count;
+};
+OFP_ASSERT(sizeof(struct ofp_flow_removed) == 88);
+
+/* Values for 'type' in ofp_error_message. These values are immutable: they
+ * will not change in future versions of the protocol (although new values may
+ * be added). */
+enum ofp_error_type {
+ OFPET_HELLO_FAILED, /* Hello protocol failed. */
+ OFPET_BAD_REQUEST, /* Request was not understood. */
+ OFPET_BAD_ACTION, /* Error in action description. */
+ OFPET_FLOW_MOD_FAILED, /* Problem modifying flow entry. */
+ OFPET_PORT_MOD_FAILED, /* Port mod request failed. */
+ OFPET_QUEUE_OP_FAILED /* Queue operation failed. */
+};
+
+/* ofp_error_msg 'code' values for OFPET_HELLO_FAILED. 'data' contains an
+ * ASCII text string that may give failure details. */
+enum ofp_hello_failed_code {
+ OFPHFC_INCOMPATIBLE, /* No compatible version. */
+ OFPHFC_EPERM /* Permissions error. */
+};
+
+/* ofp_error_msg 'code' values for OFPET_BAD_REQUEST. 'data' contains at least
+ * the first 64 bytes of the failed request. */
+enum ofp_bad_request_code {
+ OFPBRC_BAD_VERSION, /* ofp_header.version not supported. */
+ OFPBRC_BAD_TYPE, /* ofp_header.type not supported. */
+ OFPBRC_BAD_STAT, /* ofp_stats_request.type not supported. */
+ OFPBRC_BAD_VENDOR, /* Vendor not supported (in ofp_vendor_header
+ * or ofp_stats_request or ofp_stats_reply). */
+ OFPBRC_BAD_SUBTYPE, /* Vendor subtype not supported. */
+ OFPBRC_EPERM, /* Permissions error. */
+ OFPBRC_BAD_LEN, /* Wrong request length for type. */
+ OFPBRC_BUFFER_EMPTY, /* Specified buffer has already been used. */
+ OFPBRC_BUFFER_UNKNOWN /* Specified buffer does not exist. */
+};
+
+/* ofp_error_msg 'code' values for OFPET_BAD_ACTION. 'data' contains at least
+ * the first 64 bytes of the failed request. */
+enum ofp_bad_action_code {
+ OFPBAC_BAD_TYPE, /* Unknown action type. */
+ OFPBAC_BAD_LEN, /* Length problem in actions. */
+ OFPBAC_BAD_VENDOR, /* Unknown vendor id specified. */
+ OFPBAC_BAD_VENDOR_TYPE, /* Unknown action type for vendor id. */
+ OFPBAC_BAD_OUT_PORT, /* Problem validating output action. */
+ OFPBAC_BAD_ARGUMENT, /* Bad action argument. */
+ OFPBAC_EPERM, /* Permissions error. */
+ OFPBAC_TOO_MANY, /* Can't handle this many actions. */
+ OFPBAC_BAD_QUEUE /* Problem validating output queue. */
+};
+
+/* ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED. 'data' contains
+ * at least the first 64 bytes of the failed request. */
+enum ofp_flow_mod_failed_code {
+ OFPFMFC_ALL_TABLES_FULL, /* Flow not added because of full tables. */
+ OFPFMFC_OVERLAP, /* Attempted to add overlapping flow with
+ * CHECK_OVERLAP flag set. */
+ OFPFMFC_EPERM, /* Permissions error. */
+ OFPFMFC_BAD_EMERG_TIMEOUT, /* Flow not added because of non-zero idle/hard
+ * timeout. */
+ OFPFMFC_BAD_COMMAND, /* Unknown command. */
+ OFPFMFC_UNSUPPORTED /* Unsupported action list - cannot process in
+ * the order specified. */
+};
+
+/* ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED. 'data' contains
+ * at least the first 64 bytes of the failed request. */
+enum ofp_port_mod_failed_code {
+ OFPPMFC_BAD_PORT, /* Specified port does not exist. */
+ OFPPMFC_BAD_HW_ADDR, /* Specified hardware address is wrong. */
+};
+
+/* ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains
+ * at least the first 64 bytes of the failed request */
+enum ofp_queue_op_failed_code {
+ OFPQOFC_BAD_PORT, /* Invalid port (or port does not exist). */
+ OFPQOFC_BAD_QUEUE, /* Queue does not exist. */
+ OFPQOFC_EPERM /* Permissions error. */
+};
+
+/* OFPT_ERROR: Error message (datapath -> controller). */
+struct ofp_error_msg {
+ struct ofp_header header;
+
+ uint16_t type;
+ uint16_t code;
+ uint8_t data[0]; /* Variable-length data. Interpreted based
+ on the type and code. */
+};
+OFP_ASSERT(sizeof(struct ofp_error_msg) == 12);
+
+enum ofp_stats_types {
+ /* Description of this OpenFlow switch.
+ * The request body is empty.
+ * The reply body is struct ofp_desc_stats. */
+ OFPST_DESC,
+
+ /* Individual flow statistics.
+ * The request body is struct ofp_flow_stats_request.
+ * The reply body is an array of struct ofp_flow_stats. */
+ OFPST_FLOW,
+
+ /* Aggregate flow statistics.
+ * The request body is struct ofp_aggregate_stats_request.
+ * The reply body is struct ofp_aggregate_stats_reply. */
+ OFPST_AGGREGATE,
+
+ /* Flow table statistics.
+ * The request body is empty.
+ * The reply body is an array of struct ofp_table_stats. */
+ OFPST_TABLE,
+
+ /* Physical port statistics.
+ * The request body is struct ofp_port_stats_request.
+ * The reply body is an array of struct ofp_port_stats. */
+ OFPST_PORT,
+
+ /* Queue statistics for a port
+ * The request body defines the port
+ * The reply body is an array of struct ofp_queue_stats */
+ OFPST_QUEUE,
+
+ /* Vendor extension.
+ * The request and reply bodies begin with a 32-bit vendor ID, which takes
+ * the same form as in "struct ofp_vendor_header". The request and reply
+ * bodies are otherwise vendor-defined. */
+ OFPST_VENDOR = 0xffff
+};
+
+struct ofp_stats_request {
+ struct ofp_header header;
+ uint16_t type; /* One of the OFPST_* constants. */
+ uint16_t flags; /* OFPSF_REQ_* flags (none yet defined). */
+ uint8_t body[0]; /* Body of the request. */
+};
+OFP_ASSERT(sizeof(struct ofp_stats_request) == 12);
+
+enum ofp_stats_reply_flags {
+ OFPSF_REPLY_MORE = 1 << 0 /* More replies to follow. */
+};
+
+struct ofp_stats_reply {
+ struct ofp_header header;
+ uint16_t type; /* One of the OFPST_* constants. */
+ uint16_t flags; /* OFPSF_REPLY_* flags. */
+ uint8_t body[0]; /* Body of the reply. */
+};
+OFP_ASSERT(sizeof(struct ofp_stats_reply) == 12);
+
+#define DESC_STR_LEN 256
+#define SERIAL_NUM_LEN 32
+/* Body of reply to OFPST_DESC request. Each entry is a NULL-terminated
+ * ASCII string. */
+struct ofp_desc_stats {
+ char mfr_desc[DESC_STR_LEN]; /* Manufacturer description. */
+ char hw_desc[DESC_STR_LEN]; /* Hardware description. */
+ char sw_desc[DESC_STR_LEN]; /* Software description. */
+ char serial_num[SERIAL_NUM_LEN]; /* Serial number. */
+ char dp_desc[DESC_STR_LEN]; /* Human readable description of datapath. */
+};
+OFP_ASSERT(sizeof(struct ofp_desc_stats) == 1056);
+
+/* Body for ofp_stats_request of type OFPST_FLOW. */
+struct ofp_flow_stats_request {
+ struct ofp_match match; /* Fields to match. */
+ uint8_t table_id; /* ID of table to read (from ofp_table_stats),
+ 0xff for all tables or 0xfe for emergency. */
+ uint8_t pad; /* Align to 32 bits. */
+ uint16_t out_port; /* Require matching entries to include this
+ as an output port. A value of OFPP_NONE
+ indicates no restriction. */
+};
+OFP_ASSERT(sizeof(struct ofp_flow_stats_request) == 44);
+
+/* Body of reply to OFPST_FLOW request. */
+struct ofp_flow_stats {
+ uint16_t length; /* Length of this entry. */
+ uint8_t table_id; /* ID of table flow came from. */
+ uint8_t pad;
+ struct ofp_match match; /* Description of fields. */
+ uint32_t duration_sec; /* Time flow has been alive in seconds. */
+ uint32_t duration_nsec; /* Time flow has been alive in nanoseconds beyond
+ duration_sec. */
+ uint16_t priority; /* Priority of the entry. Only meaningful
+ when this is not an exact-match entry. */
+ uint16_t idle_timeout; /* Number of seconds idle before expiration. */
+ uint16_t hard_timeout; /* Number of seconds before expiration. */
+ uint8_t pad2[6]; /* Align to 64-bits. */
+ uint64_t cookie; /* Opaque controller-issued identifier. */
+ uint64_t packet_count; /* Number of packets in flow. */
+ uint64_t byte_count; /* Number of bytes in flow. */
+ struct ofp_action_header actions[0]; /* Actions. */
+};
+OFP_ASSERT(sizeof(struct ofp_flow_stats) == 88);
+
+/* Body for ofp_stats_request of type OFPST_AGGREGATE. */
+struct ofp_aggregate_stats_request {
+ struct ofp_match match; /* Fields to match. */
+ uint8_t table_id; /* ID of table to read (from ofp_table_stats)
+ 0xff for all tables or 0xfe for emergency. */
+ uint8_t pad; /* Align to 32 bits. */
+ uint16_t out_port; /* Require matching entries to include this
+ as an output port. A value of OFPP_NONE
+ indicates no restriction. */
+};
+OFP_ASSERT(sizeof(struct ofp_aggregate_stats_request) == 44);
+
+/* Body of reply to OFPST_AGGREGATE request. */
+struct ofp_aggregate_stats_reply {
+ uint64_t packet_count; /* Number of packets in flows. */
+ uint64_t byte_count; /* Number of bytes in flows. */
+ uint32_t flow_count; /* Number of flows. */
+ uint8_t pad[4]; /* Align to 64 bits. */
+};
+OFP_ASSERT(sizeof(struct ofp_aggregate_stats_reply) == 24);
+
+/* Body of reply to OFPST_TABLE request. */
+struct ofp_table_stats {
+ uint8_t table_id; /* Identifier of table. Lower numbered tables
+ are consulted first. */
+ uint8_t pad[3]; /* Align to 32-bits. */
+ char name[OFP_MAX_TABLE_NAME_LEN];
+ uint32_t wildcards; /* Bitmap of OFPFW_* wildcards that are
+ supported by the table. */
+ uint32_t max_entries; /* Max number of entries supported. */
+ uint32_t active_count; /* Number of active entries. */
+ uint64_t lookup_count; /* Number of packets looked up in table. */
+ uint64_t matched_count; /* Number of packets that hit table. */
+};
+OFP_ASSERT(sizeof(struct ofp_table_stats) == 64);
+
+/* Body for ofp_stats_request of type OFPST_PORT. */
+struct ofp_port_stats_request {
+ uint16_t port_no; /* OFPST_PORT message must request statistics
+ * either for a single port (specified in
+ * port_no) or for all ports (if port_no ==
+ * OFPP_NONE). */
+ uint8_t pad[6];
+};
+OFP_ASSERT(sizeof(struct ofp_port_stats_request) == 8);
+
+/* Body of reply to OFPST_PORT request. If a counter is unsupported, set
+ * the field to all ones. */
+struct ofp_port_stats {
+ uint16_t port_no;
+ uint8_t pad[6]; /* Align to 64-bits. */
+ uint64_t rx_packets; /* Number of received packets. */
+ uint64_t tx_packets; /* Number of transmitted packets. */
+ uint64_t rx_bytes; /* Number of received bytes. */
+ uint64_t tx_bytes; /* Number of transmitted bytes. */
+ uint64_t rx_dropped; /* Number of packets dropped by RX. */
+ uint64_t tx_dropped; /* Number of packets dropped by TX. */
+ uint64_t rx_errors; /* Number of receive errors. This is a super-set
+ of more specific receive errors and should be
+ greater than or equal to the sum of all
+ rx_*_err values. */
+ uint64_t tx_errors; /* Number of transmit errors. This is a super-set
+ of more specific transmit errors and should be
+ greater than or equal to the sum of all
+ tx_*_err values (none currently defined.) */
+ uint64_t rx_frame_err; /* Number of frame alignment errors. */
+ uint64_t rx_over_err; /* Number of packets with RX overrun. */
+ uint64_t rx_crc_err; /* Number of CRC errors. */
+ uint64_t collisions; /* Number of collisions. */
+};
+OFP_ASSERT(sizeof(struct ofp_port_stats) == 104);
+
+/* Vendor extension. */
+struct ofp_vendor_header {
+ struct ofp_header header; /* Type OFPT_VENDOR. */
+ uint32_t vendor; /* Vendor ID:
+ * - MSB 0: low-order bytes are IEEE OUI.
+ * - MSB != 0: defined by OpenFlow
+ * consortium. */
+ /* Vendor-defined arbitrary additional data. */
+};
+OFP_ASSERT(sizeof(struct ofp_vendor_header) == 12);
+
+/* All ones is used to indicate all queues in a port (for stats retrieval). */
+#define OFPQ_ALL 0xffffffff
+
+/* Min rate > 1000 means not configured. */
+#define OFPQ_MIN_RATE_UNCFG 0xffff
+
+enum ofp_queue_properties {
+ OFPQT_NONE = 0, /* No property defined for queue (default). */
+ OFPQT_MIN_RATE, /* Minimum datarate guaranteed. */
+ /* Other types should be added here
+ * (i.e. max rate, precedence, etc). */
+};
+
+/* Common description for a queue. */
+struct ofp_queue_prop_header {
+ uint16_t property; /* One of OFPQT_. */
+ uint16_t len; /* Length of property, including this header. */
+ uint8_t pad[4]; /* 64-bit alignemnt. */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_prop_header) == 8);
+
+/* Min-Rate queue property description. */
+struct ofp_queue_prop_min_rate {
+ struct ofp_queue_prop_header prop_header; /* prop: OFPQT_MIN, len: 16. */
+ uint16_t rate; /* In 1/10 of a percent; >1000 -> disabled. */
+ uint8_t pad[6]; /* 64-bit alignment */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_prop_min_rate) == 16);
+
+/* Full description for a queue. */
+struct ofp_packet_queue {
+ uint32_t queue_id; /* id for the specific queue. */
+ uint16_t len; /* Length in bytes of this queue desc. */
+ uint8_t pad[2]; /* 64-bit alignment. */
+ struct ofp_queue_prop_header properties[0]; /* List of properties. */
+};
+OFP_ASSERT(sizeof(struct ofp_packet_queue) == 8);
+
+/* Query for port queue configuration. */
+struct ofp_queue_get_config_request {
+ struct ofp_header header;
+ uint16_t port; /* Port to be queried. Should refer
+ to a valid physical port (i.e. < OFPP_MAX) */
+ uint8_t pad[2]; /* 32-bit alignment. */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_get_config_request) == 12);
+
+/* Queue configuration for a given port. */
+struct ofp_queue_get_config_reply {
+ struct ofp_header header;
+ uint16_t port;
+ uint8_t pad[6];
+ struct ofp_packet_queue queues[0]; /* List of configured queues. */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_get_config_reply) == 16);
+
+/* OFPAT_ENQUEUE action struct: send packets to given queue on port. */
+struct ofp_action_enqueue {
+ uint16_t type; /* OFPAT_ENQUEUE. */
+ uint16_t len; /* Len is 16. */
+ uint16_t port; /* Port that queue belongs. Should
+ refer to a valid physical port
+ (i.e. < OFPP_MAX) or OFPP_IN_PORT. */
+ uint8_t pad[6]; /* Pad for 64-bit alignment. */
+ uint32_t queue_id; /* Where to enqueue the packets. */
+};
+OFP_ASSERT(sizeof(struct ofp_action_enqueue) == 16);
+
+struct ofp_queue_stats_request {
+ uint16_t port_no; /* All ports if OFPT_ALL. */
+ uint8_t pad[2]; /* Align to 32-bits. */
+ uint32_t queue_id; /* All queues if OFPQ_ALL. */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_stats_request) == 8);
+
+struct ofp_queue_stats {
+ uint16_t port_no;
+ uint8_t pad[2]; /* Align to 32-bits. */
+ uint32_t queue_id; /* Queue i.d */
+ uint64_t tx_bytes; /* Number of transmitted bytes. */
+ uint64_t tx_packets; /* Number of transmitted packets. */
+ uint64_t tx_errors; /* Number of packets dropped due to overrun. */
+};
+OFP_ASSERT(sizeof(struct ofp_queue_stats) == 32);
+
+#endif /* openflow/openflow.h */
diff --git a/tools/pylibopenflow/include/pyopenflow.template.py b/tools/pylibopenflow/include/pyopenflow.template.py
new file mode 100644
index 0000000..29b59f4
--- /dev/null
+++ b/tools/pylibopenflow/include/pyopenflow.template.py
@@ -0,0 +1,21 @@
+import socket
+
+class ofsocket:
+ """OpenFlow scoket
+ """
+ def __init__(self, socket):
+ """Initialize with socket
+ """
+ ##Reference to socket
+ self.socket = socket
+
+ def send(self, msg):
+ """Send message
+ """
+ ofph = ofp_header()
+ remaining = ofph.unpack(msg)
+ if (ofph.length != len(msg)):
+ ofph.length = len(msg)
+ msg = ofph.pack()+remaining
+ self.socket.send(msg)
+
diff --git a/tools/pylibopenflow/pylib/c2py.py b/tools/pylibopenflow/pylib/c2py.py
new file mode 100644
index 0000000..b699c5e
--- /dev/null
+++ b/tools/pylibopenflow/pylib/c2py.py
@@ -0,0 +1,154 @@
+"""This module converts C types to Python struct pattern string.
+
+Date June 2009
+Created by ykk
+"""
+import cheader
+import struct
+
+class cstruct2py:
+ """Class converts C struct to Python struct pattern string
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize
+ """
+ ##Mapping
+ self.structmap = {}
+ self.structmap["char"] = "c"
+ self.structmap["signed char"] = "b"
+ self.structmap["uint8_t"]=\
+ self.structmap["unsigned char"] = "B"
+ self.structmap["short"] = "h"
+ self.structmap["uint16_t"] =\
+ self.structmap["unsigned short"] = "H"
+ self.structmap["int"] = "i"
+ self.structmap["unsigned int"] = "I"
+ self.structmap["long"] = "l"
+ self.structmap["uint32_t"] =\
+ self.structmap["unsigned long"] = "L"
+ self.structmap["long long"] = "q"
+ self.structmap["uint64_t"] =\
+ self.structmap["unsigned long long"] = "Q"
+ self.structmap["float"] = "f"
+ self.structmap["double"] = "d"
+
+ def get_pattern(self,ctype):
+ """Get pattern string for ctype.
+ Return None if ctype is not expanded.
+ """
+ if (ctype.expanded):
+ if (isinstance(ctype, cheader.cprimitive)):
+ return self.structmap[ctype.typename]
+ elif (isinstance(ctype, cheader.cstruct)):
+ string=""
+ for member in ctype.members:
+ string += self.get_pattern(member)
+ return string
+ elif (isinstance(ctype, cheader.carray)):
+ if (ctype.size == 0):
+ return ""
+ else:
+ string = self.get_pattern(ctype.object)
+ return string * ctype.size
+ return None
+
+ def get_size(self, ctype, prefix="!"):
+ """Return size of struct or pattern specified
+ """
+ if (isinstance(ctype, str)):
+ return struct.calcsize(ctype)
+ elif (isinstance(ctype, cheader.ctype)):
+ return struct.calcsize(prefix + self.get_pattern(ctype))
+ else:
+ return 0
+
+class structpacker:
+ """Pack/unpack packets with ctype.
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, prefix=""):
+ """Initialize with prefix to struct
+ """
+ ##Reference to prefix
+ self.prefix = prefix
+
+ def pack(self, ctype, *arg):
+ """Pack packet accordingly ctype or pattern provided.
+ Return struct packed.
+ """
+ if (isinstance(ctype, str)):
+ return struct.pack(self.prefix+ctype, *arg)
+ elif (isinstance(ctype, cheader.ctype)):
+ return struct.pack(self.prefix+cstruct2py.get_pattern(ctype),
+ *arg)
+ else:
+ return None
+
+ def unpack_from_front(self, ctype, binaryString, returnDictionary=True):
+ """Unpack packet using front of packet,
+ accordingly ctype or pattern provided.
+
+ Return (dictionary of values indexed by arg name,
+ remaining binary string) if ctype is cheader.ctype
+ and returnDictionary is True,
+ else return (array of data unpacked, remaining binary string).
+ """
+ pattern = ""
+ if (isinstance(ctype, str)):
+ pattern = ctype
+ elif (isinstance(ctype, cheader.ctype)):
+ pattern = cstruct2py.get_pattern(ctype)
+ else:
+ return None
+ dsize = struct.calcsize(pattern)
+
+ if (dsize > len(binaryString)):
+ return None
+
+ return (structpacker.peek_from_front(self, pattern, binaryString, returnDictionary),
+ binaryString[dsize:])
+
+ def peek_from_front(self, ctype, binaryString, returnDictionary=True):
+ """Unpack packet using front of packet,
+ accordingly ctype or pattern provided.
+
+ Return dictionary of values indexed by arg name,
+ if ctype is cheader.ctype and returnDictionary is True,
+ else return array of data unpacked.
+ """
+ pattern = self.prefix
+ if (isinstance(ctype, str)):
+ pattern += ctype
+ elif (isinstance(ctype, cheader.ctype)):
+ pattern += cstruct2py.get_pattern(ctype)
+ else:
+ return None
+ dsize = struct.calcsize(pattern)
+ if (dsize > len(binaryString)):
+ return None
+ data = struct.unpack(pattern, binaryString[0:dsize])
+
+ #Return values
+ if (isinstance(ctype, str) or
+ (not returnDictionary)):
+ return data
+ else:
+ return self.data2dic(data, ctype)
+
+ def data2dic(self,ctype,data):
+ """Convert data to dictionary
+ """
+ valDic = {}
+ names = ctype.get_names()
+ for name in names:
+ valDic[name] = []
+ for d in data:
+ name = names.pop(0)
+ valDic[name].append(d)
+ return valDic
+
diff --git a/tools/pylibopenflow/pylib/cheader.py b/tools/pylibopenflow/pylib/cheader.py
new file mode 100644
index 0000000..a23e1eb
--- /dev/null
+++ b/tools/pylibopenflow/pylib/cheader.py
@@ -0,0 +1,434 @@
+"""This module parse and store a C/C++ header file.
+
+Date June 2009
+Created by ykk
+"""
+import re
+from config import *
+
+class textfile:
+ """Class to handle text file.
+
+ Date June 2009
+ Created by ykk
+ """
+ def __init__(self, filename):
+ """Initialize filename with no content.
+ """
+ ##Filename
+ if (isinstance(filename, str)):
+ self.filename = []
+ self.filename.append(filename)
+ else:
+ self.filename = filename
+ ##Content
+ self.content = []
+
+ def read(self):
+ """Read file
+ """
+ for filename in self.filename:
+ fileRef = open(filename, "r")
+ for line in fileRef:
+ self.content.append(line)
+ fileRef.close()
+
+class ctype:
+ """Class to represent types in C
+ """
+ def __init__(self,typename, name=None, expanded=False):
+ """Initialize
+ """
+ ##Name
+ self.name = name
+ ##Type of primitive
+ self.typename = typename
+ ##Expanded
+ self.expanded = expanded
+
+ def expand(self, cheader):
+ """Expand type if applicable
+ """
+ raise NotImplementedError()
+
+ def get_names(self):
+ """Return name of variables
+ """
+ raise NotImplementedError()
+
+class cprimitive(ctype):
+ """Class to represent C primitive
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self,typename, name=None):
+ """Initialize and store primitive
+ """
+ ctype.__init__(self, typename, name, True)
+
+ def __str__(self):
+ """Return string representation
+ """
+ if (self.name == None):
+ return self.typename
+ else:
+ return self.typename+" "+str(self.name)
+
+ def expand(self, cheader):
+ """Expand type if applicable
+ """
+ pass
+
+ def get_names(self):
+ """Return name of variables
+ """
+ namelist = []
+ namelist.append(self.name)
+ return namelist
+
+class cstruct(ctype):
+ """Class to represent C struct
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, typename, name=None):
+ """Initialize struct
+ """
+ ctype.__init__(self, typename, name)
+ ##List of members in struct
+ self.members = []
+
+ def __str__(self):
+ """Return string representation
+ """
+ string = "struct "+self.typename
+ if (self.name != None):
+ string += " "+self.name
+ if (len(self.members) == 0):
+ return string
+ #Add members
+ string +=" {\n"
+ for member in self.members:
+ string += "\t"+str(member)
+ if (not isinstance(member, cstruct)):
+ string += ";"
+ string += "\n"
+ string +="};"
+ return string
+
+ def expand(self, cheader):
+ """Expand struct
+ """
+ self.expanded = True
+ #Expanded each member
+ for member in self.members:
+ if (isinstance(member, cstruct) and
+ (not member.expanded)):
+ try:
+ if (not cheader.structs[member.typename].expanded):
+ cheader.structs[member.typename].expand(cheader)
+ member.members=cheader.structs[member.typename].members[:]
+ member.expanded = True
+ except KeyError:
+ self.expanded=False
+ else:
+ member.expand(cheader)
+
+ def get_names(self):
+ """Return name of variables
+ """
+ namelist = []
+ for member in self.members:
+ if (isinstance(member, cstruct)):
+ tmplist = member.get_names()
+ for item in tmplist:
+ namelist.append(member.name+"."+item)
+ else:
+ namelist.extend(member.get_names())
+ return namelist
+
+
+class carray(ctype):
+ """Class to represent C array
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, typename, name, isPrimitive, size):
+ """Initialize array of object.
+ """
+ ctype.__init__(self, typename, name,
+ (isinstance(size, int) and isPrimitive))
+ ##Object reference
+ if (isPrimitive):
+ self.object = cprimitive(typename, name)
+ else:
+ self.object = cstruct(typename, name)
+ ##Size of array
+ self.size = size
+
+ def __str__(self):
+ """Return string representation
+ """
+ return str(self.object)+"["+str(self.size)+"]"
+
+ def expand(self, cheader):
+ """Expand array
+ """
+ self.expanded = True
+ if (not self.object.expanded):
+ if (isinstance(self.object, cstruct)):
+ cheader.structs[self.object.typename].expand(cheader)
+ self.object.members=cheader.structs[self.object.typename].members[:]
+ else:
+ self.object.expand(cheader)
+
+ if (not isinstance(self.size, int)):
+ val = cheader.get_value(self.size)
+ if (val == None):
+ self.expanded = False
+ else:
+ try:
+ self.size = int(val)
+ except ValueError:
+ self.size = val
+ self.expanded = False
+
+ def get_names(self):
+ """Return name of variables
+ """
+ namelist = []
+ for i in range(0,self.size):
+ namelist.append(self.object.name)
+ return namelist
+
+class ctype_parser:
+ """Class to check c types
+
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize
+ """
+ self.CPrimitives = ["char","signed char","unsigned char",
+ "short","unsigned short",
+ "int","unsigned int",
+ "long","unsigned long",
+ "long long","unsigned long long",
+ "float","double",
+ "uint8_t","uint16_t","uint32_t","uint64_t"]
+
+ def is_primitive(self,type):
+ """Check type given is primitive.
+
+ Return true if valid, and false otherwise
+ """
+ if (type in self.CPrimitives):
+ return True
+ else:
+ return False
+
+ def is_array(self, string):
+ """Check if string declares an array
+ """
+ parts=string.strip().split()
+ if (len(parts) <= 1):
+ return False
+ else:
+ pattern = re.compile("\[.*?\]", re.MULTILINE)
+ values = pattern.findall(string)
+ if (len(values) == 1):
+ return True
+ else:
+ return False
+
+ def parse_array(self, string):
+ """Parse array from string.
+ Return occurrence and name.
+ """
+ pattern = re.compile("\[.*?\]", re.MULTILINE)
+ namepattern = re.compile(".*?\[", re.MULTILINE)
+ values = pattern.findall(string)
+ if (len(values) != 1):
+ return (1,string)
+ else:
+ val = values[0][1:-1]
+ try:
+ sizeval = int(val)
+ except ValueError:
+ if (val==""):
+ sizeval = 0
+ else:
+ sizeval = val
+ return (sizeval,
+ namepattern.findall(string)[0].strip()[0:-1])
+
+ def parse_type(self, string):
+ """Parse string and return cstruct or cprimitive.
+ Else return None
+ """
+ parts=string.strip().split()
+ if (len(parts) >= 2):
+ if (parts[0].strip() == "struct"):
+ typename = " ".join(parts[1:-1])
+ else:
+ typename = " ".join(parts[:-1])
+ (size, name) = self.parse_array(parts[-1])
+ if IGNORE_ZERO_ARRAYS and size == 0:
+ return None
+ #Create appropriate type
+ if (size != 1):
+ #Array
+ return carray(typename, name,
+ self.is_primitive(typename),size)
+ else:
+ #Not array
+ if IGNORE_OFP_HEADER and typename == "ofp_header":
+ return None
+ if (self.is_primitive(typename)):
+ return cprimitive(typename, name)
+ else:
+ return cstruct(typename, name)
+ else:
+ return None
+
+class cheaderfile(textfile):
+ """Class to handle C header file.
+
+ Date June 2009
+ Created by ykk
+ """
+ def __init__(self, filename):
+ """Initialize filename and read from file
+ """
+ textfile.__init__(self,filename)
+ self.read()
+ self.__remove_comments()
+ ##Dictionary of macros
+ self.macros = {}
+ self.__get_macros()
+ ##Dictionary of enumerations
+ self.enums = {}
+ self.enum_values = {}
+ self.__get_enum()
+ self.__get_enum_values()
+ ##Dictionary of structs
+ self.structs = {}
+ self.__get_struct()
+
+ def get_enum_name(self, enum, value):
+ """Return name of variable in enum
+ """
+ for e in self.enums[enum]:
+ if (self.enum_values[e] == value):
+ return e
+
+ def eval_value(self, value):
+ """Evaluate value string
+ """
+ try:
+ return eval(value, self.enum_values)
+ except:
+ return value.strip()
+
+ def get_value(self, name):
+ """Get value for variable name,
+ searching through enum and macros.
+ Else return None
+ """
+ try:
+ return self.enum_values[name]
+ except KeyError:
+ try:
+ return self.macros[name]
+ except KeyError:
+ return None
+
+ def __remove_comments(self):
+ """Remove all comments
+ """
+ fileStr = "".join(self.content)
+ pattern = re.compile("\\\.*?\n", re.MULTILINE)
+ fileStr = pattern.sub("",fileStr)
+ pattern = re.compile(r"/\*.*?\*/", re.MULTILINE|re.DOTALL)
+ fileStr = pattern.sub("",fileStr)
+ pattern = re.compile("//.*$", re.MULTILINE)
+ fileStr = pattern.sub("",fileStr)
+ self.content = fileStr.split('\n')
+
+ def __get_struct(self):
+ """Get all structs
+ """
+ typeparser = ctype_parser()
+ fileStr = "".join(self.content)
+ #Remove attribute
+ attrpattern = re.compile("} __attribute__ \(\((.+?)\)\);", re.MULTILINE)
+ attrmatches = attrpattern.findall(fileStr)
+ for amatch in attrmatches:
+ fileStr=fileStr.replace(" __attribute__ (("+amatch+"));",";")
+ #Find all structs
+ pattern = re.compile("struct[\w\s]*?{.*?};", re.MULTILINE)
+ matches = pattern.findall(fileStr)
+ #Process each struct
+ namepattern = re.compile("struct(.+?)[ {]", re.MULTILINE)
+ pattern = re.compile("{(.+?)};", re.MULTILINE)
+ for match in matches:
+ structname = namepattern.findall(match)[0].strip()
+ if (len(structname) != 0):
+ values = pattern.findall(match)[0].strip().split(";")
+ cstru = cstruct(structname)
+ for val in values:
+ presult = typeparser.parse_type(val)
+ if (presult != None):
+ cstru.members.append(presult)
+ self.structs[structname] = cstru
+ #Expand all structs
+ for (structname, struct) in self.structs.items():
+ struct.expand(self)
+
+ def __get_enum(self):
+ """Get all enumeration
+ """
+ fileStr = "".join(self.content)
+ #Find all enumerations
+ pattern = re.compile("enum[\w\s]*?{.*?}", re.MULTILINE)
+ matches = pattern.findall(fileStr)
+ #Process each enumeration
+ namepattern = re.compile("enum(.+?){", re.MULTILINE)
+ pattern = re.compile("{(.+?)}", re.MULTILINE)
+ for match in matches:
+ values = pattern.findall(match)[0].strip().split(",")
+ #Process each value in enumeration
+ enumList = []
+ value = 0
+ for val in values:
+ if not (val.strip() == ""):
+ valList=val.strip().split("=")
+ enumList.append(valList[0].strip())
+ if (len(valList) == 1):
+ self.enum_values[valList[0].strip()] = value
+ value += 1
+ else:
+ self.enum_values[valList[0].strip()] = self.eval_value(valList[1].strip())
+ self.enums[namepattern.findall(match)[0].strip()] = enumList
+
+ def __get_enum_values(self):
+ """Patch unresolved enum values
+ """
+ for name,enumval in self.enum_values.items():
+ if isinstance(enumval,str):
+ self.enum_values[name] = self.eval_value(enumval)
+
+ def __get_macros(self):
+ """Extract macros
+ """
+ for line in self.content:
+ if (line[0:8] == "#define "):
+ lineList = line[8:].split()
+ if (len(lineList) >= 2):
+ self.macros[lineList[0]] = self.eval_value("".join(lineList[1:]))
+ else:
+ self.macros[lineList[0]] = ""
diff --git a/tools/pylibopenflow/pylib/config.py b/tools/pylibopenflow/pylib/config.py
new file mode 100644
index 0000000..bbf1528
--- /dev/null
+++ b/tools/pylibopenflow/pylib/config.py
@@ -0,0 +1,24 @@
+
+# of_message specific controls
+
+# Do not include any arrays marked [0]
+IGNORE_ZERO_ARRAYS = 1
+
+# Do not include the ofp_header as a member in any structure
+# This allows messages to be consistently generated as:
+# explicit header declaration
+# core member declaration
+# variable length data
+IGNORE_OFP_HEADER = 1
+
+# Generate object equality functions
+GEN_OBJ_EQUALITY = 1
+
+# Generate object show functions
+GEN_OBJ_SHOW = 1
+
+# Generate lists of enum values
+GEN_ENUM_VALUES_LIST = 0
+
+# Generate dictionary of enum strings to values
+GEN_ENUM_DICTIONARY = 1
diff --git a/tools/pylibopenflow/pylib/cpythonize.py b/tools/pylibopenflow/pylib/cpythonize.py
new file mode 100644
index 0000000..6a66d7c
--- /dev/null
+++ b/tools/pylibopenflow/pylib/cpythonize.py
@@ -0,0 +1,533 @@
+"""This module generate Python code for C structs.
+
+Date January 2010
+Created by ykk
+"""
+import cheader
+import c2py
+import datetime
+import struct
+import re
+from config import *
+
+def _space_to(n, str):
+ """
+ Generate a string of spaces to achieve width n given string str
+ If length of str >= n, return one space
+ """
+ spaces = n - len(str)
+ if spaces > 0:
+ return " " * spaces
+ return " "
+
+class rules:
+ """Class that specify rules for pythonization
+
+ Date January 2010
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize rules
+ """
+ ##Default values for members
+ self.default_values = {}
+ #Default values for struct
+ self.struct_default = {}
+ ##What is a tab
+ self.tab = " "
+ ##Macros to exclude
+ self.excluded_macros = []
+ ##Enforce mapping
+ self.enforced_maps = {}
+
+ def get_enforced_map(self, structname):
+ """Get code to enforce mapping
+ """
+ code = []
+ try:
+ mapping = self.enforced_maps[structname]
+ except KeyError:
+ return None
+ for (x,xlist) in mapping:
+ code.append("if (not (self."+x+" in "+xlist+")):")
+ code.append(self.tab+"return (False, \""+x+" must have values from "+xlist+"\")")
+ return code
+
+
+ def get_struct_default(self, structname, fieldname):
+ """Get code to set defaults for member struct
+ """
+ try:
+ return "."+fieldname+self.struct_default[(structname, fieldname)]
+ except KeyError:
+ return None
+
+ def get_default_value(self, structname, fieldname):
+ """Get default value for struct's field
+ """
+ try:
+ return self.default_values[(structname, fieldname)]
+ except KeyError:
+ return 0
+
+ def include_macro(self, name):
+ """Check if macro should be included
+ """
+ return not (name in self.excluded_macros)
+
+class pythonizer:
+ """Class that pythonize C structures
+
+ Date January 2010
+ Created by ykk
+ """
+ def __init__(self, cheaderfile, pyrules = None, tab=" "):
+ """Initialize
+ """
+ ##Rules
+ if (pyrules == None):
+ self.rules = rules()
+ else:
+ self.rules = pyrules
+ ##What is a tab (same as rules)
+ self.tab = str(tab)
+ self.rules.tab = self.tab
+ ##Reference to C header file
+ self.cheader = cheaderfile
+ ##Reference to cstruct2py
+ self.__c2py = c2py.cstruct2py()
+ ##Code for assertion
+ self.__assertcode = []
+
+ def pycode(self,preamble=None):
+ """Return pythonized code
+ """
+ code = []
+ code.append("import struct")
+ code.append("")
+ if (preamble != None):
+ fileRef = open(preamble,"r")
+ for l in fileRef:
+ code.append(l[:-1])
+ fileRef.close()
+ code.append("# Structure definitions")
+ for name,struct in self.cheader.structs.items():
+ code.extend(self.pycode_struct(struct))
+ code.append("")
+ code.append("# Enumerated type definitions")
+ for name,enum in self.cheader.enums.items():
+ code.extend(self.pycode_enum(name,enum))
+ if GEN_ENUM_DICTIONARY:
+ code.extend(self.pycode_enum_map(name,enum))
+ code.append("")
+ code.append("# Values from macro definitions")
+ for name,macro in self.cheader.macros.items():
+ code.extend(self.pycode_macro(name))
+ code.append("")
+ code.append("# Basic structure size definitions.")
+ if IGNORE_OFP_HEADER:
+ code.append("# Does not include ofp_header members.")
+ if IGNORE_ZERO_ARRAYS:
+ code.append("# Does not include variable length arrays.")
+ struct_keys = self.cheader.structs.keys()
+ struct_keys.sort()
+ for name in struct_keys:
+ struct = self.cheader.structs[name]
+ code.append(self.pycode_struct_size(name, struct))
+
+ return code
+
+ def pycode_enum(self, name, enum):
+ """Return Python array for enum
+ """
+ code=[]
+ code.append(name+" = "+str(enum))
+ ev = []
+ for e in enum:
+ v = self.cheader.get_value(e)
+ ev.append(v)
+ code.append(e+"%s= "%_space_to(36,e)+str(v))
+ if GEN_ENUM_VALUES_LIST:
+ code.append(name+"_values = "+str(ev))
+ return code
+
+ def pycode_enum_map(self, name, enum):
+ """Return Python dictionary for enum
+ """
+ code = []
+ code.append(name+"_map = {")
+ first = 1
+ for e in enum:
+ v = self.cheader.get_value(e)
+ if first:
+ prev_e = e
+ prev_v = v
+ first = 0
+ else:
+ code.append(self.tab + "'%s'%s: %s," %
+ (prev_e, _space_to(30, prev_e), prev_v))
+ prev_e = e
+ prev_v = v
+ code.append(self.tab + "'%s'%s: %s" %
+ (prev_e, _space_to(30, prev_e), prev_v))
+ code.append("}")
+ return code
+
+ def pycode_macro(self,name):
+ """Return Python dict for macro
+ """
+ code = []
+ if (self.rules.include_macro(name)):
+ code.append(name+" = "+str(self.cheader.get_value(name)))
+ return code
+
+ def pycode_struct_size(self, name, struct):
+ """Return one liner giving the structure size in bytes
+ """
+ pattern = '!' + self.__c2py.get_pattern(struct)
+ bytes = self.__c2py.get_size(pattern)
+ code = name.upper() + "_BYTES = " + str(bytes)
+ return code
+
+ def pycode_struct(self, struct_in):
+ """Return Python class code given C struct.
+
+ Returns None if struct_in is not cheader.cstruct.
+ Else return list of strings that codes Python class.
+ """
+ if (not isinstance(struct_in, cheader.cstruct)):
+ return None
+
+ code=[]
+ self.__assertcode = []
+ code.extend(self.codeheader(struct_in))
+ code.extend(self.codeinit(struct_in))
+ code.append("")
+ code.extend(self.codeassert(struct_in))
+ code.append("")
+ code.extend(self.codepack(struct_in))
+ code.append("")
+ code.extend(self.codeunpack(struct_in))
+ code.append("")
+ code.extend(self.codelen(struct_in))
+ code.append("")
+ if GEN_OBJ_EQUALITY:
+ code.extend(self.codeeq(struct_in))
+ code.append("")
+ if GEN_OBJ_SHOW:
+ code.extend(self.codeshow(struct_in))
+ code.append("")
+ return code
+
+ def codeheader(self, struct_in):
+ """Return Python code for header
+ """
+ code=[]
+ code.append("class "+struct_in.typename+":")
+ code.append(self.tab+"\"\"\"Automatically generated Python class for "+struct_in.typename)
+ code.append("")
+ code.append(self.tab+"Date "+str(datetime.date.today()))
+ code.append(self.tab+"Created by "+self.__module__+"."+self.__class__.__name__)
+ if IGNORE_OFP_HEADER:
+ code.append(self.tab+"Core structure: Messages do not include ofp_header")
+ if IGNORE_ZERO_ARRAYS:
+ code.append(self.tab+"Does not include var-length arrays")
+ code.append(self.tab+"\"\"\"")
+ return code
+
+ def codeinit(self, struct_in):
+ """Return Python code for init function
+ """
+ code = []
+ code.append(self.tab+"def __init__(self):")
+ code.append(self.tab*2+"\"\"\"Initialize")
+ code.append(self.tab*2+"Declare members and default values")
+ code.append(self.tab*2+"\"\"\"")
+ code.extend(self.codemembers(struct_in,self.tab*2+"self"))
+ return code
+
+ def codemembers(self, struct_in, prepend=""):
+ """Return members of class
+ """
+ code = []
+ for member in struct_in.members:
+ if (isinstance(member, cheader.cstruct)):
+ code.append(prepend+"."+member.name+" = "+member.typename+"()")
+ struct_default = self.rules.get_struct_default(struct_in.typename, member.name)
+ if (struct_default != None):
+ code.append(prepend+struct_default)
+ self.__structassert(member, (prepend+"."+member.name).strip())
+ elif (isinstance(member, cheader.carray)):
+ if (member.typename == "char"):
+ initvalue = "\"\""
+ self.__stringassert(member, (prepend+"."+member.name).strip())
+ else:
+ if (isinstance(member.object, cheader.cprimitive)):
+ initvalue="0"
+ else:
+ initvalue="None"
+ initvalue=(initvalue+",")*member.size
+ initvalue="["+initvalue[:-1]+"]"
+ self.__arrayassert(member, (prepend+"."+member.name).strip())
+ code.append(prepend+"."+member.name+"= "+initvalue)
+ else:
+ code.append(prepend+"."+member.name+" = "+
+ str(self.rules.get_default_value(struct_in.typename, member.name)))
+ return code
+
+ def __structassert(self, cstruct, cstructname):
+ """Return code to check for C array
+ """
+ self.__assertcode.append(self.tab*2+"if(not isinstance("+cstructname+", "+cstruct.typename+")):")
+ self.__assertcode.append(self.tab*3+"return (False, \""+cstructname+" is not class "+cstruct.typename+" as expected.\")")
+
+ def __addassert(self, prefix):
+ code = []
+ code.append(prefix+"if(not self.__assert()[0]):")
+ code.append(prefix+self.tab+"return None")
+ return code
+
+ def __stringassert(self, carray, carrayname):
+ """Return code to check for C array
+ """
+ self.__assertcode.append(self.tab*2+"if(not isinstance("+carrayname+", str)):")
+ self.__assertcode.append(self.tab*3+"return (False, \""+carrayname+" is not string as expected.\")")
+ self.__assertcode.append(self.tab*2+"if(len("+carrayname+") > "+str(carray.size)+"):")
+ self.__assertcode.append(self.tab*3+"return (False, \""+carrayname+" is not of size "+str(carray.size)+" as expected.\")")
+
+ def __arrayassert(self, carray, carrayname):
+ """Return code to check for C array
+ """
+ if (carray.size == 0):
+ return
+ self.__assertcode.append(self.tab*2+"if(not isinstance("+carrayname+", list)):")
+ self.__assertcode.append(self.tab*3+"return (False, \""+carrayname+" is not list as expected.\")")
+ self.__assertcode.append(self.tab*2+"if(len("+carrayname+") != "+str(carray.size)+"):")
+ self.__assertcode.append(self.tab*3+"return (False, \""+carrayname+" is not of size "+str(carray.size)+" as expected.\")")
+
+ def codeassert(self, struct_in):
+ """Return code for sanity checking
+ """
+ code = []
+ code.append(self.tab+"def __assert(self):")
+ code.append(self.tab*2+"\"\"\"Sanity check")
+ code.append(self.tab*2+"\"\"\"")
+ enforce = self.rules.get_enforced_map(struct_in.typename)
+ if (enforce != None):
+ for line in enforce:
+ code.append(self.tab*2+line)
+ code.extend(self.__assertcode)
+ code.append(self.tab*2+"return (True, None)")
+ return code
+
+ def codepack(self, struct_in, prefix="!"):
+ """Return code that pack struct
+ """
+ code = []
+ code.append(self.tab+"def pack(self, assertstruct=True):")
+ code.append(self.tab*2+"\"\"\"Pack message")
+ code.append(self.tab*2+"Packs empty array used as placeholder")
+ code.append(self.tab*2+"\"\"\"")
+ code.append(self.tab*2+"if(assertstruct):")
+ code.extend(self.__addassert(self.tab*3))
+ code.append(self.tab*2+"packed = \"\"")
+ primPattern = ""
+ primMemberNames = []
+ for member in struct_in.members:
+ if (isinstance(member, cheader.cprimitive)):
+ #Primitives
+ primPattern += self.__c2py.structmap[member.typename]
+ primMemberNames.append("self."+member.name)
+ else:
+ (primPattern, primMemberNames) = \
+ self.__codepackprimitive(code, primPattern,
+ primMemberNames, prefix)
+ if (isinstance(member, cheader.cstruct)):
+ #Struct
+ code.append(self.tab*2+"packed += self."+member.name+".pack()")
+ elif (isinstance(member, cheader.carray) and member.typename == "char"):
+ #String
+ code.append(self.tab*2+"packed += self."+member.name+".ljust("+\
+ str(member.size)+",'\\0')")
+ elif (isinstance(member, cheader.carray) and \
+ isinstance(member.object, cheader.cprimitive)):
+ #Array of Primitives
+ expandedarr = ""
+ if (member.size != 0):
+ for x in range(0, member.size):
+ expandedarr += ", self."+member.name+"["+\
+ str(x).strip()+"]"
+ code.append(self.tab*2+"packed += struct.pack(\""+prefix+\
+ self.__c2py.structmap[member.object.typename]*member.size+\
+ "\""+expandedarr+")")
+ else:
+ code.append(self.tab*2+"for i in self."+member.name+":")
+ code.append(self.tab*3+"packed += struct.pack(\""+\
+ prefix+self.__c2py.get_pattern(member.object)+\
+ "\",i)")
+ elif (isinstance(member, cheader.carray) and \
+ isinstance(member.object, cheader.cstruct)):
+ #Array of struct
+ if (member.size != 0):
+ for x in range(0, member.size):
+ code.append(self.tab*2+"packed += self."+member.name+"["+\
+ str(x).strip()+"].pack()")
+ else:
+ code.append(self.tab*2+"for i in self."+member.name+":")
+ code.append(self.tab*3+"packed += i.pack(assertstruct)")
+ #Clear remaining fields
+ (primPattern, primMemberNames) = \
+ self.__codepackprimitive(code, primPattern,
+ primMemberNames, prefix)
+ code.append(self.tab*2+"return packed")
+ return code
+
+ def __codepackprimitive(self, code, primPattern, primMemberNames, prefix):
+ """Return code for packing primitives
+ """
+ if (primPattern != ""):
+ #Clear prior primitives
+ code.append(self.tab*2+"packed += struct.pack(\""+\
+ prefix+primPattern+"\", "+\
+ str(primMemberNames).replace("'","")[1:-1]+")")
+ return ("",[])
+
+ def codelen(self, struct_in):
+ """Return code to return length
+ """
+ pattern = "!" + self.__c2py.get_pattern(struct_in)
+ code = []
+ code.append(self.tab+"def __len__(self):")
+ code.append(self.tab*2+"\"\"\"Return length of message")
+ code.append(self.tab*2+"\"\"\"")
+ code.append(self.tab*2+"l = "+str(self.__c2py.get_size(pattern)))
+ for member in struct_in.members:
+ if (isinstance(member, cheader.carray) and member.size == 0):
+ if (isinstance(member.object, cheader.cstruct)):
+ code.append(self.tab*2+"for i in self."+member.name+":")
+ code.append(self.tab*3+"l += i.length()")
+ else:
+ pattern="!"+self.__c2py.get_pattern(member.object)
+ size=self.__c2py.get_size(pattern)
+ code.append(self.tab*2+"l += len(self."+member.name+")*"+str(size))
+ code.append(self.tab*2+"return l")
+ return code
+
+ def codeeq(self, struct_in):
+ """Return code to return equality comparisons
+ """
+ code = []
+ code.append(self.tab+"def __eq__(self, other):")
+ code.append(self.tab*2+"\"\"\"Return True if self and other have same values")
+ code.append(self.tab*2+"\"\"\"")
+ code.append(self.tab*2+"if type(self) != type(other): return False")
+ for member in struct_in.members:
+ code.append(self.tab*2 + "if self." + member.name + " != other." +
+ member.name + ": return False")
+ code.append(self.tab*2+"return True")
+ code.append("")
+ code.append(self.tab+"def __ne__(self, other): return not self.__eq__(other)")
+ return code
+
+ def codeshow(self, struct_in):
+ """Return code to print basic members of structure
+ """
+ code = []
+ code.append(self.tab+"def show(self, prefix=''):")
+ code.append(self.tab*2+"\"\"\"" + "Print basic members of structure")
+ code.append(self.tab*2+"\"\"\"")
+ for member in struct_in.members:
+ if re.search('pad', member.name):
+ continue
+ elif (isinstance(member, cheader.cstruct)):
+ code.append(self.tab*2 + "print prefix + '" +
+ member.name + ": ' ")
+ code.append(self.tab*2 + "self." + member.name +
+ ".show(prefix + ' ')")
+ elif (isinstance(member, cheader.carray) and
+ not isinstance(member.object, cheader.cprimitive)):
+ code.append(self.tab*2 + "print prefix + '" + member.name +
+ ": ' ")
+ code.append(self.tab*2 + "for obj in self." + member.name + ":")
+ code.append(self.tab*3 + "obj.show(prefix + ' ')")
+ else:
+ code.append(self.tab*2 + "print prefix + '" + member.name +
+ ": ' + str(self." + member.name + ")")
+ return code
+
+ def codeunpack(self, struct_in, prefix="!"):
+ """Return code that unpack struct
+ """
+ pattern = self.__c2py.get_pattern(struct_in)
+ structlen = self.__c2py.get_size(prefix + pattern)
+ code = []
+ code.append(self.tab+"def unpack(self, binaryString):")
+ code.append(self.tab*2+"\"\"\"Unpack message")
+ code.append(self.tab*2+"Do not unpack empty array used as placeholder")
+ code.append(self.tab*2+"since they can contain heterogeneous type")
+ code.append(self.tab*2+"\"\"\"")
+ code.append(self.tab*2+"if (len(binaryString) < "+str(structlen)+"):")
+ code.append(self.tab*3+"return binaryString")
+ offset = 0
+ primPattern = ""
+ primMemberNames = []
+ for member in struct_in.members:
+ if (isinstance(member, cheader.cprimitive)):
+ #Primitives
+ primPattern += self.__c2py.structmap[member.typename]
+ primMemberNames.append("self."+member.name)
+ else:
+ (primPattern, primMemberNames, offset) = \
+ self.__codeunpackprimitive(code, offset, primPattern,
+ primMemberNames, prefix)
+ if (isinstance(member, cheader.cstruct)):
+ #Struct
+ code.append(self.tab*2+"self."+member.name+\
+ ".unpack(binaryString["+str(offset)+":])")
+ pattern = self.__c2py.get_pattern(member)
+ offset += self.__c2py.get_size(prefix+pattern)
+ elif (isinstance(member, cheader.carray) and member.typename == "char"):
+ #String
+ code.append(self.tab*2+"self."+member.name+\
+ " = binaryString["+str(offset)+":"+\
+ str(offset+member.size)+"].replace(\"\\0\",\"\")")
+ offset += member.size
+ elif (isinstance(member, cheader.carray) and \
+ isinstance(member.object, cheader.cprimitive)):
+ #Array of Primitives
+ expandedarr = ""
+ if (member.size != 0):
+ arrpattern = self.__c2py.structmap[member.object.typename]*member.size
+ for x in range(0, member.size):
+ expandedarr += "self."+member.name+"["+\
+ str(x).strip()+"], "
+ code.append(self.tab*2+"("+expandedarr[:-2]+") = struct.unpack_from(\""+\
+ prefix+arrpattern+\
+ "\", binaryString, "+str(offset)+")")
+ offset += struct.calcsize(prefix + arrpattern)
+ elif (isinstance(member, cheader.carray) and \
+ isinstance(member.object, cheader.cstruct)):
+ #Array of struct
+ astructlen = self.__c2py.get_size("!"+self.__c2py.get_pattern(member.object))
+ for x in range(0, member.size):
+ code.append(self.tab*2+"self."+member.name+"["+str(x)+"]"+\
+ ".unpack(binaryString["+str(offset)+":])")
+ offset += astructlen
+ #Clear remaining fields
+ (primPattern, primMemberNames, offset) = \
+ self.__codeunpackprimitive(code, offset, primPattern,
+ primMemberNames, prefix)
+ code.append(self.tab*2+"return binaryString["+str(structlen)+":]");
+ return code
+
+ def __codeunpackprimitive(self, code, offset, primPattern,
+ primMemberNames, prefix):
+ """Return code for unpacking primitives
+ """
+ if (primPattern != ""):
+ #Clear prior primitives
+ code.append(self.tab*2+"("+str(primMemberNames).replace("'","")[1:-1]+\
+ ") = struct.unpack_from(\""+\
+ prefix+primPattern+"\", binaryString, "+str(offset)+")")
+ return ("",[], offset+struct.calcsize(prefix+primPattern))
+
diff --git a/tools/pylibopenflow/pylib/lavi/__init__.py b/tools/pylibopenflow/pylib/lavi/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/pylibopenflow/pylib/lavi/__init__.py
diff --git a/tools/pylibopenflow/pylib/lavi/pythonize.py b/tools/pylibopenflow/pylib/lavi/pythonize.py
new file mode 100644
index 0000000..3c150aa
--- /dev/null
+++ b/tools/pylibopenflow/pylib/lavi/pythonize.py
@@ -0,0 +1,74 @@
+"""This module generate Python code for LAVI and messenger
+
+(C) Copyright Stanford University
+Date January 2010
+Created by ykk
+"""
+import cpythonize
+
+class msgrules(cpythonize.rules):
+ """Class that specify rules for pythonization of messenger
+
+ (C) Copyright Stanford University
+ Date January 2010
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize rules
+ """
+ cpythonize.rules.__init__(self)
+ ##Default values for members
+ #Default values for struct
+ ##Macros to exclude
+ self.excluded_macros = ['MESSAGE_HH__']
+ ##Enforce mapping
+ self.enforced_maps['messenger_msg'] = [ ('type','msg_type') ]
+
+class lavirules(msgrules):
+ """Class that specify rules for pythonization of LAVI messages
+
+ (C) Copyright Stanford University
+ Date January 2010
+ Created by ykk
+ """
+ def __init__(self, laviheader):
+ """Initialize rules
+ """
+ msgrules.__init__(self)
+ ##Default values for members
+
+ #Default values for struct
+ self.struct_default[('lavi_poll_message',
+ 'header')] = ".type = "+str(laviheader.get_value('LAVIT_POLL'))
+ self.struct_default[('lavi_poll_stop_message',
+ 'header')] = ".type = "+str(laviheader.get_value('LAVIT_POLL_STOP'))
+ ##Macros to exclude
+ self.excluded_macros = ['LAVI_MSG_HH']
+ ##Enforce mapping
+ self.enforced_maps['lavi_header'] = [ ('type','lavi_type') ]
+
+class msgpythonizer(cpythonize.pythonizer):
+ """Class that pythonize C messenger messages
+
+ (C) Copyright Stanford University
+ Date January 2010
+ Created by ykk
+ """
+ def __init__(self, msgheader):
+ """Initialize
+ """
+ rules = msgrules()
+ cpythonize.pythonizer.__init__(self, msgheader, rules)
+
+class lavipythonizer(cpythonize.pythonizer):
+ """Class that pythonize C messenger messages
+
+ (C) Copyright Stanford University
+ Date December 2009
+ Created by ykk
+ """
+ def __init__(self, msgheader):
+ """Initialize
+ """
+ rules = lavirules(msgheader)
+ cpythonize.pythonizer.__init__(self, msgheader, rules)
diff --git a/tools/pylibopenflow/pylib/of/__init__.py b/tools/pylibopenflow/pylib/of/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tools/pylibopenflow/pylib/of/__init__.py
diff --git a/tools/pylibopenflow/pylib/of/msg.py b/tools/pylibopenflow/pylib/of/msg.py
new file mode 100644
index 0000000..8617f56
--- /dev/null
+++ b/tools/pylibopenflow/pylib/of/msg.py
@@ -0,0 +1,117 @@
+"""This module parses OpenFlow packets.
+
+Unfortunately, this has to be updated manually for each OpenFlow version
+and packet type. Ugly.
+
+(C) Copyright Stanford University
+Date October 2009
+Created by ykk
+"""
+class parser:
+ """Parser for OpenFlow packets
+
+ (C) Copyright Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, messages):
+ """Initialize
+ """
+ ##Internal reference to OpenFlow messages
+ self.__messages = messages
+
+ def describe(self, packet):
+ """Parse OpenFlow packet and return string description
+ """
+ dic = self.__messages.peek_from_front("ofp_header", packet)
+ desc = self.header_describe(dic)
+ if (dic["type"][0] == self.__messages.get_value("OFPT_HELLO")):
+ pass
+ elif (dic["type"][0] == self.__messages.get_value("OFPT_SET_CONFIG")):
+ desc += "\n\t"+self.switch_config_describe(packet)
+ elif (dic["type"][0] == self.__messages.get_value("OFPT_FLOW_MOD")):
+ (fmdic, remaining) = self.__messages.unpack_from_front("ofp_flow_mod", packet)
+ desc += self.flow_mod_describe(fmdic, "\n\t")
+ desc += "\n\twith remaining "+str(len(remaining))+" bytes"
+ else:
+ desc += "\n\tUnparsed..."
+ return desc
+
+ def flow_mod_describe(self, packet, prefix=""):
+ """Parse flow mod and return description
+ """
+ dic = self.__assert_dic(packet, "ofp_flow_mod")
+ if (dic == None):
+ return ""
+ return prefix+\
+ "Flow_mod of command "+self.__messages.get_enum_name("ofp_flow_mod_command", dic["command"][0])+\
+ " idle/hard timeout:"+str(dic["idle_timeout"][0])+"/"+str(dic["hard_timeout"][0])+\
+ self.match_describe(dic, "match.", "\n\t")+\
+ prefix+\
+ "(priority:"+str(dic["priority"][0])+\
+ "/buffer id:"+str(dic["buffer_id"][0])+\
+ "/out port:"+str(dic["out_port"][0])+")"
+
+ def match_describe(self, dic, nameprefix="", prefix=""):
+ """Return description for ofp match
+ """
+ return prefix+"match wildcards:%x" % dic[nameprefix+"wildcards"][0]+\
+ " inport="+str(dic[nameprefix+"in_port"][0])+\
+ prefix+" "+\
+ " ethertype="+str(dic[nameprefix+"dl_type"][0])+\
+ " vlan="+str(dic[nameprefix+"dl_vlan"][0])+\
+ " "+self.eth_describe(dic[nameprefix+"dl_src"])+"->"+\
+ self.eth_describe(dic[nameprefix+"dl_dst"])+\
+ prefix+" "+\
+ " ipproto="+str(dic[nameprefix+"nw_proto"][0])+\
+ " "+self.ip_describe(dic[nameprefix+"nw_src"][0])+\
+ "->"+self.ip_describe(dic[nameprefix+"nw_src"][0])+\
+ prefix+" "+\
+ " transport "+str(dic[nameprefix+"tp_src"][0])+\
+ "->"+str(dic[nameprefix+"tp_dst"][0])
+
+ def switch_config_describe(self, packet):
+ """Parse OpenFlow switch config and return description
+ """
+ dic = self.__assert_dic(packet, "ofp_switch_config")
+ if (dic == None):
+ return ""
+ return "with flag "+str(self.__messages.get_enum_name("ofp_config_flags", dic["flags"][0]))+\
+ " and miss send length "+str(dic["miss_send_len"][0])
+
+ def header_describe(self, packet):
+ """Parse OpenFlow header and return string description
+ """
+ dic = self.__assert_dic(packet, "ofp_header")
+ if (dic == None):
+ return ""
+ return self.__messages.get_enum_name("ofp_type", dic["type"][0])+" packet "+\
+ "(length:"+str(dic["length"][0])+\
+ "/xid:"+str(dic["xid"][0])+")"
+
+ def ip_describe(self, value):
+ """Return string for ip address
+ """
+ desc = ""
+ for i in range(0,4):
+ (value, cv) = divmod(value, 256)
+ desc = str(cv).strip() +"." + desc
+ return desc
+
+ def eth_describe(self, etheraddr):
+ """Return string for ethernet address
+ """
+ desc = ""
+ for value in etheraddr:
+ desc += ":"+("%x" % value).zfill(2)
+ return desc[1:]
+
+ def __assert_dic(self, packet, typename):
+ """Assert and ensure dictionary is given
+ """
+ dic = None
+ if (isinstance(packet, str)):
+ dic = self.__messages.peek_from_front(typename, packet)
+ elif (isinstance(packet, dict)):
+ dic = packet
+ return dic
diff --git a/tools/pylibopenflow/pylib/of/network.py b/tools/pylibopenflow/pylib/of/network.py
new file mode 100644
index 0000000..6765a12
--- /dev/null
+++ b/tools/pylibopenflow/pylib/of/network.py
@@ -0,0 +1,191 @@
+"""This module holds the network.
+
+Copyright(C) 2009, Stanford University
+Date October 2009
+Created by ykk
+"""
+import random
+import openflow
+
+class network:
+ """Class holding information about OpenFlow network
+ """
+ def __init__(self):
+ """Initialize
+ """
+ ##List of switches
+ self.switches = []
+ ##Dictionary of links
+ self.links = {}
+ ##Reference to connections
+ self.connections = openflow.connections()
+
+ def add_switch(self, sw):
+ """Add switch to network
+ """
+ self.switches.append(sw)
+ self.connections.add_connection(sw, sw.connection)
+
+ def add_link(self, link):
+ """Add link to network
+ """
+ try:
+ self.links[link.switch1,link.switch2].append(link)
+ except KeyError:
+ self.links[link.switch1,link.switch2] = []
+ self.links[link.switch1,link.switch2].append(link)
+
+class link:
+ """Class to hold information about link
+
+ Copyright(C) 2009, Stanford University
+ Date November 2009
+ Created by ykk
+ """
+ def __init__(self, switch1, switch2):
+ """Initialize link between specified switches
+ """
+ ##Reference to first switch
+ self.switch1 = switch1
+ ##Reference to second switch
+ self.switch2 = switch2
+
+class switch:
+ """Class holding information about OpenFlow switch
+
+ Copyright(C) 2009, Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, miss_send_len=128,
+ sock=None, dpid=None, n_buffers=100, n_tables=1,
+ capability=None):
+ """Initialize switch
+ """
+ ##Socket to controller
+ self.sock = sock
+ ##Datapath id of switch
+ if (dpid != None):
+ self.datapath_id = dpid
+ else:
+ self.datapath_id = random.randrange(1, pow(2,64))
+ ##Number of buffers
+ self.n_buffers = n_buffers
+ ##Number of tables
+ self.n_tables= n_tables
+ ##Capabilities
+ if (isinstance(capability, switch_capabilities)):
+ self.capability = capability
+ else:
+ self.capability = switch_capabilities(miss_send_len)
+ ##Valid Actions
+ self.valid_actions = 0
+ ##List of port
+ self.port = []
+
+class switch_capabilities:
+ """Class to hold switch capabilities
+ """
+ def __init__(self, miss_send_len=128):
+ """Initialize
+
+ Copyright(C) 2009, Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ ##Capabilities support by datapath
+ self.flow_stats = True
+ self.table_stats = True
+ self.port_stats = True
+ self.stp = True
+ self.multi_phy_tx = True
+ self.ip_resam = False
+ ##Switch config
+ self.send_exp = None
+ self.ip_frag = 0
+ self.miss_send_len = miss_send_len
+ ##Valid actions
+ self.act_output = True
+ self.act_set_vlan_vid = True
+ self.act_set_vlan_pcp = True
+ self.act_strip_vlan = True
+ self.act_set_dl_src = True
+ self.act_set_dl_dst = True
+ self.act_set_nw_src = True
+ self.act_set_nw_dst = True
+ self.act_set_tp_src = True
+ self.act_set_tp_dst = True
+ self.act_vendor = False
+
+ def get_capability(self, ofmsg):
+ """Return value for uint32_t capability field
+ """
+ value = 0
+ if (self.flow_stats):
+ value += ofmsg.get_value("OFPC_FLOW_STATS")
+ if (self.table_stats):
+ value += ofmsg.get_value("OFPC_TABLE_STATS")
+ if (self.port_stats):
+ value += ofmsg.get_value("OFPC_PORT_STATS")
+ if (self.stp):
+ value += ofmsg.get_value("OFPC_STP")
+ if (self.multi_phy_tx):
+ value += ofmsg.get_value("OFPC_MULTI_PHY_TX")
+ if (self.ip_resam):
+ value += ofmsg.get_value("OFPC_IP_REASM")
+ return value
+
+ def get_actions(self, ofmsg):
+ """Return value for uint32_t action field
+ """
+ value = 0
+ if (self.act_output):
+ value += (1 << (ofmsg.get_value("OFPAT_OUTPUT")+1))
+ if (self.act_set_vlan_vid):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_VLAN_VID")+1))
+ if (self.act_set_vlan_pcp):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_VLAN_PCP")+1))
+ if (self.act_strip_vlan):
+ value += (1 << (ofmsg.get_value("OFPAT_STRIP_VLAN")+1))
+ if (self.act_set_dl_src):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_DL_SRC")+1))
+ if (self.act_set_dl_dst):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_DL_DST")+1))
+ if (self.act_set_nw_src):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_NW_SRC")+1))
+ if (self.act_set_nw_dst):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_NW_DST")+1))
+ if (self.act_set_tp_src):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_TP_SRC")+1))
+ if (self.act_set_tp_dst):
+ value += (1 << (ofmsg.get_value("OFPAT_SET_TP_DST")+1))
+ return value
+
+class port:
+ """Class to hold information about port
+
+ Copyright(C) 2009, Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, port_no, stp=(2 << 8), hw_addr=None, name=""):
+ """Initialize
+ """
+ ##Port properties
+ self.port_no = port_no
+ if (hw_addr != None):
+ self.hw_addr = hw_addr
+ else:
+ self.hw_addr = random.randrange(1, pow(2,48))
+ self.name = name
+ ##Port config
+ self.port_down = False
+ self.no_stp = False
+ self.no_recv = False
+ self.no_recv_stp = False
+ self.no_flood = False
+ self.no_fwd = False
+ self.no_packet_in = False
+ #Port state
+ self.link_down = False
+ self.stp = stp
diff --git a/tools/pylibopenflow/pylib/of/pythonize.py b/tools/pylibopenflow/pylib/of/pythonize.py
new file mode 100644
index 0000000..687512b
--- /dev/null
+++ b/tools/pylibopenflow/pylib/of/pythonize.py
@@ -0,0 +1,57 @@
+"""This module generate Python code for OpenFlow structs.
+
+(C) Copyright Stanford University
+Date December 2009
+Created by ykk
+"""
+import cpythonize
+from config import *
+
+class rules(cpythonize.rules):
+ """Class that specify rules for pythonization of OpenFlow messages
+
+ (C) Copyright Stanford University
+ Date December 2009
+ Created by ykk
+ """
+ def __init__(self, ofmsg):
+ """Initialize rules
+ """
+ cpythonize.rules.__init__(self)
+ ##Reference to ofmsg
+ self.__ofmsg = ofmsg
+ ##Default values for members
+ self.default_values[('ofp_header','version')] = self.__ofmsg.get_value('OFP_VERSION')
+ self.default_values[('ofp_switch_config',\
+ 'miss_send_len')] = self.__ofmsg.get_value('OFP_DEFAULT_MISS_SEND_LEN')
+ for x in ['ofp_flow_mod','ofp_flow_expired','ofp_flow_stats']:
+ self.default_values[(x,'priority')] = self.__ofmsg.get_value('OFP_DEFAULT_PRIORITY')
+ #Default values for struct
+ self.struct_default[('ofp_flow_mod',
+ 'header')] = ".type = OFPT_FLOW_MOD"
+# 'header')] = ".type = "+str(self.__ofmsg.get_value('OFPT_FLOW_MOD'))
+ ##Macros to exclude
+ self.excluded_macros = ['OFP_ASSERT(EXPR)','OFP_ASSERT(_EXPR)','OFP_ASSERT',
+ 'icmp_type','icmp_code','OFP_PACKED',
+ 'OPENFLOW_OPENFLOW_H']
+ ##Enforce mapping
+ if GEN_ENUM_VALUES_LIST:
+ self.enforced_maps['ofp_header'] = [ ('type','ofp_type_values') ]
+ elif GEN_ENUM_DICTIONARY:
+ self.enforced_maps['ofp_header'] = \
+ [ ('type','ofp_type_map.values()') ]
+
+class pythonizer(cpythonize.pythonizer):
+ """Class that pythonize C structures of OpenFlow messages
+
+ (C) Copyright Stanford University
+ Date December 2009
+ Created by ykk
+ """
+ def __init__(self, ofmsg):
+ """Initialize
+ """
+ ofrules = rules(ofmsg)
+ cpythonize.pythonizer.__init__(self, ofmsg, ofrules)
+ ##Reference to OpenFlow message class
+ self.__ofmsg = ofmsg
diff --git a/tools/pylibopenflow/pylib/of/simu.py b/tools/pylibopenflow/pylib/of/simu.py
new file mode 100644
index 0000000..508b076
--- /dev/null
+++ b/tools/pylibopenflow/pylib/of/simu.py
@@ -0,0 +1,144 @@
+"""This module simulates the network.
+
+Copyright(C) 2009, Stanford University
+Date November 2009
+Created by ykk
+"""
+import openflow
+import output
+import of.msg
+import of.network
+
+class network(of.network.network):
+ """Class to simulate OpenFlow network
+
+ Copyright(C) 2009, Stanford University
+ Date November 2009
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize network
+ """
+ of.network.network.__init__(self)
+ ##Name of use for output
+ self.name = self.__class__.__name__+str(id(self))
+
+class link(of.network.link):
+ """Class to simulate link
+
+ Copyright(C) 2009, Stanford University
+ Date November 2009
+ Created by ykk
+ """
+ def __init__(self, switch1, switch2, isUp=True):
+ """Initialize link
+ """
+ of.network.link.__init__(self, switch1, switch2)
+ ##Name of use for output
+ self.name = self.__class__.__name__+str(id(self))
+ ##Indicate if link is up
+ self.isUp = isUp
+
+class switch(of.network.switch):
+ """Class to simulate OpenFlow switch
+
+ Copyright(C) 2009, Stanford University
+ Date November 2009
+ Created by ykk
+ """
+ def __init__(self, messages, controller, port, miss_send_len=128,
+ dpid=None, n_buffers=100, n_tables=1,
+ capability=None, parser=None, connection=None):
+ """Initialize switch
+ """
+ of.network.switch.__init__(self, miss_send_len,
+ None, dpid, n_buffers, n_tables,
+ capability)
+ ##Name of use for output
+ self.name = self.__class__.__name__+str(id(self))
+ ##Reference to OpenFlow messages
+ self.__messages = messages
+ ##Reference to connection
+ self.connection = openflow.tcpsocket(messages, controller, port)
+ self.sock = self.connection.sock
+ ##Reference to Parser
+ self.parser = None
+ if (parser == None):
+ self.parser = of.msg.parser(messages)
+ else:
+ self.parser = parser
+
+ def receive_openflow(self, packet):
+ """Switch receive OpenFlow packet, and respond accordingly
+ """
+ dic = self.__messages.peek_from_front("ofp_header", packet)
+ if (dic["type"][0] == self.__messages.get_value("OFPT_HELLO")):
+ output.dbg("Receive hello", self.name)
+ elif (dic["type"][0] == self.__messages.get_value("OFPT_ECHO_REQUEST")):
+ self.reply_echo(dic["xid"][0])
+ elif (dic["type"][0] == self.__messages.get_value("OFPT_FEATURES_REQUEST")):
+ self.reply_features(dic["xid"][0])
+ elif (dic["type"][0] == self.__messages.get_value("OFPT_FLOW_MOD")):
+ self.handle_flow_mod(packet)
+ else:
+ output.dbg("Unprocessed message "+self.parser.header_describe(dic),
+ self.name)
+
+ def send_hello(self):
+ """Send hello
+ """
+ self.connection.structsend("ofp_hello",
+ 0, self.__messages.get_value("OFPT_HELLO"),
+ 0, 0)
+ output.dbg("Send hello",self.name)
+
+ def send_packet(self, inport, bufferid=None, packet="", xid=0, reason=None):
+ """Send packet in
+ Assume no match as reason, bufferid = 0xFFFFFFFF,
+ and empty packet by default
+ """
+ if (reason == None):
+ reason = self.__messages.get_value("OFPR_NO_MATCH")
+ if (bufferid == None):
+ bufferid = int("0xFFFFFFFF",16)
+ pktin = self.__messages.pack("ofp_packet_in",
+ 0, self.__messages.get_value("OFPT_PACKET_IN"),
+ 0, xid, #header
+ bufferid, len(packet),
+ inport, reason, 0)
+ self.connection.structsend_raw(pktin+packet)
+ output.dbg("Send packet ",self.name)
+
+ def send_echo(self, xid=0):
+ """Send echo
+ """
+ self.connection.structsend_xid("ofp_header",
+ 0, self.__messages.get_value("OFPT_ECHO_REQUEST"),
+ 0, xid)
+ output.dbg("Send echo", self.name)
+
+ def reply_echo(self, xid):
+ """Reply to echo request
+ """
+ self.connection.structsend_xid("ofp_header",
+ 0, self.__messages.get_value("OFPT_ECHO_REPLY"),
+ 0, xid)
+ output.dbg("Reply echo of xid:"+str(xid),self.name)
+
+ def reply_features(self, xid):
+ """Reply to feature request
+ """
+ self.connection.structsend_xid("ofp_switch_features",
+ 0, self.__messages.get_value("OFPT_FEATURES_REPLY"),
+ 0, xid,
+ self.datapath_id, self.n_buffers,
+ self.n_tables,0,0,0,
+ self.capability.get_capability(self.__messages),
+ self.capability.get_actions(self.__messages))
+ output.dbg("Replied features request of xid "+str(xid), self.name)
+
+ def handle_flow_mod(self, packet):
+ """Handle flow mod: just print it here
+ """
+ output.dbg(self.parser.flow_mod_describe(packet), self.name)
+
diff --git a/tools/pylibopenflow/pylib/openflow.py b/tools/pylibopenflow/pylib/openflow.py
new file mode 100644
index 0000000..25945b9
--- /dev/null
+++ b/tools/pylibopenflow/pylib/openflow.py
@@ -0,0 +1,336 @@
+"""This module exports OpenFlow protocol to Python.
+
+(C) Copyright Stanford University
+Date October 2009
+Created by ykk
+"""
+import c2py
+import cheader
+import os
+import socket
+import select
+import struct
+import time
+
+class messages(cheader.cheaderfile,c2py.cstruct2py,c2py.structpacker):
+ """Class to handle OpenFlow messages
+
+ (C) Copyright Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, openflow_headerfile=None):
+ """Initialize with OpenFlow header file
+
+ If filename is not provided, check the environment
+ variable PYLIB_OPENFLOW_HEADER and search for openflow.h
+ """
+ if (openflow_headerfile != None):
+ cheader.cheaderfile.__init__(self, openflow_headerfile)
+ else:
+ #Check environment variable
+ path = os.getenv("PYLIB_OPENFLOW_HEADER")
+ if not path:
+ print "PYLIB_OPENFLOW_HEADER is not set in environment"
+ sys.exit(2)
+ cheader.cheaderfile.__init__(self, path+"/openflow.h")
+ #Initialize cstruct2py
+ c2py.cstruct2py.__init__(self)
+ #Initalize packet
+ c2py.structpacker.__init__(self, "!")
+ ##Cached patterns
+ self.patterns={}
+ for (cstructname, cstruct) in self.structs.items():
+ self.patterns[cstructname] = self.get_pattern(cstruct)
+
+ def get_size(self, ctype):
+ """Get size for ctype or name of type.
+ Return None if ctype is not expanded or
+ type with name is not found.
+ """
+ pattern = self.get_pattern(ctype)
+ if (pattern != None):
+ return c2py.cstruct2py.get_size(self,pattern)
+
+ def get_pattern(self,ctype):
+ """Get pattern string for ctype or name of type.
+ Return None if ctype is not expanded or
+ type with name is not found.
+ """
+ if (isinstance(ctype, str)):
+ #Is name
+ return self.patterns[ctype]
+ else:
+ return c2py.cstruct2py.get_pattern(self, ctype)
+
+ def pack(self, ctype, *arg):
+ """Pack packet accordingly ctype or name of type provided.
+ Return struct packed.
+ """
+ if (isinstance(ctype, str)):
+ return struct.pack(self.prefix+self.patterns[ctype], *arg)
+ else:
+ return c2py.structpacker.pack(self, ctype, *arg)
+
+ def peek_from_front(self, ctype, binaryString, returnDictionary=True):
+ """Unpack packet using front of the packet,
+ accordingly ctype or name of ctype provided.
+
+ Return dictionary of values indexed by arg name,
+ if ctype is known struct/type and returnDictionary is True,
+ else return array of data unpacked.
+ """
+ if (isinstance(ctype,str)):
+ data = c2py.structpacker.peek_from_front(self,
+ self.patterns[ctype],
+ binaryString,
+ returnDictionary)
+ return self.data2dic(self.structs[ctype], data)
+ else:
+ return c2py.structpacker.peek_from_front(self,
+ ctype,
+ binaryString,
+ returnDictionary)
+
+ def unpack_from_front(self, ctype, binaryString, returnDictionary=True):
+ """Unpack packet using front of packet,
+ accordingly ctype or name of ctype provided.
+
+ Return (dictionary of values indexed by arg name,
+ remaining binary string) if ctype is known struct/type
+ and returnDictionary is True,
+ else return (array of data unpacked, remaining binary string).
+ """
+ if (isinstance(ctype,str)):
+ (data, remaining) = c2py.structpacker.unpack_from_front(self,
+ self.patterns[ctype],
+ binaryString,
+ returnDictionary)
+ return (self.data2dic(self.structs[ctype], data), remaining)
+ else:
+ return c2py.structpacker.unpack_from_front(self,
+ ctype,
+ binaryString,
+ returnDictionary)
+
+class connection:
+ """Class to hold a connection.
+
+ (C) Copyright Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, messages, sock=None):
+ """Initialize
+ """
+ ##Reference to socket
+ self.sock = sock
+ ##Internal reference to OpenFlow messages
+ self._messages = messages
+ ##Buffer
+ self.buffer = ""
+ ##Header length for OpenFlow
+ self.__header_length = self._messages.get_size("ofp_header")
+
+ def send(self, msg):
+ """Send bare message (given as binary string)
+ """
+ raise NotImplementedError()
+
+ def structsend(self, ctype, *arg):
+ """Build and send message.
+ """
+ self.send(self._messages.pack(ctype, *arg))
+
+ def receive(self, maxlength=1024):
+ """Receive raw in non-blocking way.
+
+ Return buffer
+ """
+ if (select.select([self.sock],[],[],0)[0]):
+ self.buffer += self.sock.recv(maxlength)
+ return self.buffer
+
+ def buffer_has_msg(self):
+ """Check if buffer has a complete message
+ """
+ #Check at least ofp_header is received
+ if (len(self.buffer) < self.__header_length):
+ return False
+ values = self._messages.peek_from_front("ofp_header", self.buffer)
+ return (len(self.buffer) >= values["length"][0])
+
+ def get_msg(self):
+ """Get message from current buffer
+ """
+ if (self.buffer_has_msg()):
+ values = self._messages.peek_from_front("ofp_header", self.buffer)
+ msg = self.buffer[:values["length"][0]]
+ self.buffer = self.buffer[values["length"][0]:]
+ return msg
+ else:
+ return None
+
+ def msgreceive(self, blocking=False, pollInterval=0.001):
+ """Receive OpenFlow message.
+
+ If non-blocking, can return None.
+ """
+ self.receive()
+ if (self.buffer_has_msg()):
+ return self.get_msg()
+ if (blocking):
+ while (not self.buffer_has_msg()):
+ time.sleep(pollInterval)
+ self.receive()
+ return self.get_msg()
+
+class safeconnection(connection):
+ """OpenFlow connection with safety checks
+
+ (C) Copyright Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, messages, sock=None, version=None,
+ xidstart = 0, autoxid=True):
+ """Initialize with OpenFlow version.
+ """
+ connection.__init__(self, messages, sock)
+ ##OpenFlow version
+ if (version != None):
+ self.version = version
+ else:
+ self.version = messages.get_value("OFP_VERSION")
+ ##xid Counter
+ self.nextxid = xidstart
+ ##Automatic xid
+ self.autoxid = autoxid
+ ##Miss auto xid
+ self.skipautoxid = 0
+
+ def skip_auto_xid(self, n):
+ """Miss automatic xid for the next n packets
+ """
+ self.skipautoxid = n
+
+ def structsend_xid(self, ctype, *arg):
+ """Build and send message, populating header automatically.
+ Type and xid of message is not populated.
+ """
+ self.skipautoxid+=1
+ self.structsend(ctype, *arg)
+
+ def structsend(self, ctype, *arg):
+ """Build and send message, populating header automatically.
+ Type of message is not populated
+ """
+ msg = self._messages.pack(ctype, *arg)
+ self.structsend_raw(msg)
+
+ def structsend_raw(self, msg):
+ """Check ofp_header and ensure correctness before sending.
+ """
+ (dic, remaining) = self._messages.unpack_from_front("ofp_header", msg)
+ #Amend header
+ if (self.version != None):
+ dic["version"][0] = self.version
+ if (self.autoxid and (self.skipautoxid == 0)):
+ dic["xid"][0] = self.nextxid
+ self.nextxid+=1
+ if (self.skipautoxid != 0):
+ self.skipautoxid-=1
+ dic["length"][0] = len(remaining)+8
+ #Send message
+ self.send(self._messages.pack("ofp_header",
+ dic["version"][0],
+ dic["type"][0],
+ dic["length"][0],
+ dic["xid"][0])+\
+ remaining)
+
+class tcpsocket(safeconnection):
+ """Class to hold connection
+
+ (C) Copyright Stanford University
+ Date October 2009
+ Created by ykk
+ """
+ def __init__(self, messages, host, port):
+ """Initialize TCP socket to host and port
+ """
+ safeconnection.__init__(self, messages)
+ ##Reference to socket
+ self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.sock.connect((host, port))
+ self.sock.setblocking(False)
+ self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 0)
+
+ def __del__(self):
+ """Terminate connection
+ """
+ self.sock.shutdown(1)
+ self.sock.close()
+
+ def send(self, msg):
+ """Send raw message (binary string)
+ """
+ self.sock.sendall(msg)
+
+class connections:
+ """Class to hold multiple connections
+
+ (C) Copyright Stanford University
+ Date November 2009
+ Created by ykk
+ """
+ def __init__(self):
+ """Initialize
+ """
+ ##List of sockets
+ self.__sockets = []
+ ##Dicionary of sockets to connection
+ self.__connections = {}
+
+ def add_connection(self, reference, connect):
+ """Add connection with opaque reference object
+ """
+ if (not isinstance(connect,connection)):
+ raise RuntimeError("Connection must be openflow.connection!")
+ self.__sockets.append(connect.sock)
+ self.__connections[connect.sock] = (reference, connect)
+
+ def receive(self, maxlength=1024):
+ """Receive raw in non-blocking way
+ """
+ read_ready = select.select(self.__sockets,[],[],0)[0]
+ for sock in read_ready:
+ self.__connections[sock][1].receive(maxlength)
+
+ def has_msg(self):
+ """Check if any of the connections has a message
+
+ Return (reference,connection) with message
+ """
+ for sock, refconnect in self.__connections.items():
+ if (refconnect[1].buffer_has_msg()):
+ return refconnect
+ return None
+
+ def msgreceive(self, blocking=False, pollInterval=0.001):
+ """Receive OpenFlow message.
+
+ If non-blocking, can return None.
+ """
+ self.receive()
+ c = self.has_msg()
+ if (c != None):
+ return (c[0],c[1].get_msg())
+ if (blocking):
+ while (c == None):
+ time.sleep(pollInterval)
+ self.receive()
+ c = self.has_msg()
+ else:
+ return (None, None)
+ return (c[0],c[1].get_msg())
diff --git a/tools/pylibopenflow/pylib/output.py b/tools/pylibopenflow/pylib/output.py
new file mode 100644
index 0000000..64df4f5
--- /dev/null
+++ b/tools/pylibopenflow/pylib/output.py
@@ -0,0 +1,85 @@
+"""This module implements output printing.
+
+Output are divided into 4 levels and
+can be configured for different verbosity
+
+Copyright(C) 2009, Stanford University
+Date August 2009
+Created by ykk
+"""
+
+##Various output modes
+MODE = {}
+MODE["ERR"] = 0
+MODE["WARN"] = 1
+MODE["INFO"] = 2
+MODE["DBG"] = 3
+
+#Global mode
+global output_mode
+output_mode = None
+
+def set_mode(msg_mode, who=None):
+ """Set the message mode for who
+ If who is None, set global mode
+ """
+ global output_mode
+ if (output_mode == None):
+ output_mode = {}
+ output_mode["global"] = MODE["WARN"]
+ output_mode["DBG"] = []
+ output_mode["INFO"] = []
+ output_mode["WARN"] = []
+
+ #Set global mode
+ if (who == None):
+ output_mode["global"] = MODE[msg_mode]
+ return
+
+ #Individual mode
+ if (msg_mode == "ERR"):
+ return
+ for mode in ["WARN","INFO","DBG"]:
+ if (not (who in mode[mode])):
+ mode[mode].append(who)
+ if (msg_mode == mode):
+ return
+
+def output(msg_mode, msg, who=None):
+ """Print message
+ """
+ global output_mode
+ if (output_mode == None):
+ raise RuntimeException("Output mode is not set")
+
+ #Indicate who string
+ if (who == None):
+ whostr = ""
+ else:
+ whostr = who+":"
+
+ #Print output
+ if (MODE[msg_mode] <= output_mode["global"]):
+ print msg_mode.ljust(4, ' ')+"|"+whostr+msg
+ elif (who in output_mode[msg_mode]):
+ print msg_mode.ljust(4, ' ')+"|"+whostr+msg
+
+def err(msg, who=None):
+ """Print error messages
+ """
+ output("ERR", msg, who)
+
+def warn(msg, who=None):
+ """Print warning messages
+ """
+ output("WARN", msg, who)
+
+def info(msg, who=None):
+ """Print informational messages
+ """
+ output("INFO", msg, who)
+
+def dbg(msg, who=None):
+ """Print debug messages
+ """
+ output("DBG", msg, who)