Async/streaming gRPC client/server proto

This experiment was to fine-tune how we can implement
async gRPC client and server code inside a Twisted
python app.

Change-Id: I945014e27f4b9d6ed624666e0284cc298548adb3

Major cleanup of openflow_13.proto

Change-Id: I4e54eaf87b682124ec518a0ade1a6050a6ec6da8

Relocated openflow_13.proto to voltha

Change-Id: I66ae45a9142d180c2c6651e75c7a1ee08aef7ef8

Removed forced utest from make build

Change-Id: If0da58e9d135ebde6ca68c3316688a03a7b10f2f

twisted openflow agent first pass

Change-Id: Ibe5b4727ccfe92e6fd464ccd3baf6275569ef5d3

store openflow derived files

Change-Id: Ib3e1384bb2ca2a9c0872767f7b793f96b0a154e2

Minor cleanup

Change-Id: I1280ed3acb606121b616a0efd573f5f59d010dca

Factored out common utils

Change-Id: Icd86fcd50f60d0900924674cbcd65e13e47782a1

Refactored twisted agent

Change-Id: I71f26ce5357a4f98477df60b8c5ddc068cf75d43

Relocated openflow agent to ofagent

... and preserved obsolete working (non-twisted) agent under
~/obsolete, so we can still run the olt-oftest and pass tests,
unit the new twisted based agent reaches that maturity point.

Change-Id: I727f8d7144b1291a40276dad2966b7643bd7bc4b

olt-oftest in fake mode works with new agent

Change-Id: I43b4f5812e8dfaa9f45e4a77fdcf6c30ac520f8d

Initial ofagent/voltha operation

Change-Id: Ia8104f1285a6b1c51635d36d7d78fc113f800e79

Additional callouts to Voltha

Change-Id: If8f483d5140d3c9d45f22b480b8d33249a29cd4e

More gRPC calls

Change-Id: I7d24fadf9425217fb26ffe18f25359d072ef38fa

Flow add/list now works

Change-Id: Ie3e3e73108645b47891cef798fc61372a022fd93

Missed some files

Change-Id: I29e81238ff1a26c095c0c73e521579edf7092e21
diff --git a/obsolete/Makefile b/obsolete/Makefile
new file mode 100644
index 0000000..e0854b1
--- /dev/null
+++ b/obsolete/Makefile
@@ -0,0 +1,26 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifeq ($(VOLTHA_BASE)_set,_set)
+$(error If your getting this error, please type ". ./env.sh" at the top directory)
+endif
+
+include $(VOLTHA_BASE)/setup.mk
+
+PYTHONFILES:=$(wildcard *.py)
+
+flake8: 
+	$(FLAKE8) $(PYTHONFILES)
diff --git a/obsolete/README.md b/obsolete/README.md
new file mode 100644
index 0000000..2a185b9
--- /dev/null
+++ b/obsolete/README.md
@@ -0,0 +1,15 @@
+
+To get this agent to work with the ONOS olt-test, the following
+command was used in the shell to launch the agent.
+
+NOTE: This command should soon be eliminated as the agent should
+be started by VOLTHA.
+
+```
+$ cd <LOCATION_OF_VOLTHA>
+$ sudo -s
+# . ./env.sh
+# cd <LOCATION_OF_VOLTHA>/voltha/northbound/openflow
+(venv-linux) # python agent/main.py -v --in-out-iface=enp1s0f0 --in-out-stag=4004
+```
+
diff --git a/obsolete/agent.py b/obsolete/agent.py
new file mode 100644
index 0000000..a0a14fc
--- /dev/null
+++ b/obsolete/agent.py
@@ -0,0 +1,280 @@
+import logging
+import loxi.of13 as ofp
+import socket
+import sys
+import time
+
+from loxi.connection import Connection
+from ofagent.utils import pp
+
+
+class Agent(object):
+
+    def __init__(self, controller, datapath_id,
+                 store, backend, retry_interval=1):
+        self.ip = controller.split(':')[0]
+        self.port = int(controller.split(':')[1])
+        self.datapath_id = datapath_id
+        self.store = store
+        self.backend = backend
+        self.exiting = False
+        self.retry_interval = retry_interval
+        self.cxn = None
+        self.soc = None
+
+    def run(self):
+        self.connect()
+
+    def connect(self):
+        """
+        Connect to a controller
+        """
+        while not self.exiting:
+            self.cxn = None
+            self.soc = soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+            try:
+                soc.connect((self.ip, self.port))
+            except socket.error, e:
+                logging.info(
+                    "Cannot connect to controller (errno=%d), "
+                    "retrying in %s secs" %
+                    (e.errno, self.retry_interval))
+            else:
+                logging.info("Connected to controller")
+                soc.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
+                self.cxn = cxn = Connection(self.soc)
+                cxn.daemon = False
+                cxn.start()
+                try:
+                    self.handle_protocol()
+                except Exception, e:
+                    logging.info(
+                        "Connection was lost (%s), will retry in %s secs" %
+                        (e, self.retry_interval))
+            time.sleep(self.retry_interval)
+
+    def stop(self):
+        if self.cxn is not None:
+            self.cxn.stop()
+        if self.soc is not None:
+            self.soc.close()
+
+    def signal_flow_mod_error(self, code, data):
+        msg = ofp.message.flow_mod_failed_error_msg(code=code, data=data)
+        self.cxn.send(msg)
+
+    def signal_group_mod_error(self, code, data):
+        msg = ofp.message.group_mod_failed_error_msg(code=code, data=data)
+        self.cxn.send(msg)
+
+    def signal_flow_removal(self, flow):
+        assert isinstance(flow, ofp.common.flow_stats_entry)
+        msg = ofp.message.flow_removed(
+            cookie=flow.cookie,
+            priority=flow.priority,
+            reason=None, # TODO
+            table_id=flow.table_id,
+            duration_sec=flow.duration_sec,
+            duration_nsec=flow.duration_nsec,
+            idle_timeout=flow.idle_timeout,
+            hard_timeout=flow.hard_timeout,
+            packet_count=flow.packet_count,
+            byte_count=flow.byte_count,
+            match=flow.match)
+        self.cxn.send(msg)
+
+    def send_packet_in(self, data, in_port):
+        match = ofp.match()
+        match.oxm_list.append(ofp.oxm.in_port(in_port))
+        msg = ofp.message.packet_in(
+            reason=ofp.OFPR_ACTION,
+            match=match,
+            data=data)
+        self.cxn.send(msg)
+
+    def handle_protocol(self):
+
+        cxn = self.cxn
+
+        # Send initial hello
+        cxn.send(ofp.message.hello())
+
+        if not cxn.recv(lambda msg: msg.type == ofp.OFPT_HELLO):
+            raise Exception("Did not receive initial HELLO")
+
+        while True:
+
+            try:
+                req = cxn.recv(lambda msg: True)
+            except AssertionError, e:
+                raise Exception("Connection is no longer alive")
+
+            print(pp(req))
+
+            if req is None:
+                # this simply means we timed out
+                # later we can use this to do other stuff
+                # for now we simply ignore this and loop back
+                pass
+
+            elif req.type == ofp.OFPT_FEATURES_REQUEST:
+                msg = ofp.message.features_reply(
+                    xid=req.xid,
+                    datapath_id=self.datapath_id,
+                    n_buffers=256,
+                    n_tables=2,
+                    capabilities= (
+                          ofp.OFPC_FLOW_STATS
+                        | ofp.OFPC_TABLE_STATS
+                        | ofp.OFPC_PORT_STATS
+                        | ofp.OFPC_GROUP_STATS
+                    )
+                )
+                cxn.send(msg)
+
+            elif req.type == ofp.OFPT_STATS_REQUEST:
+
+                if req.stats_type == ofp.OFPST_PORT_DESC:
+                    # port stats request
+                    msg = ofp.message.port_desc_stats_reply(
+                        xid=req.xid,
+                        #flags=None,
+                        entries=self.store.port_list())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_DESC:
+                    # device description
+                    msg = ofp.message.desc_stats_reply(
+                        xid=req.xid,
+                        flags=None,
+                        mfr_desc=self.backend.mfr_desc,
+                        hw_desc=self.backend.hw_desc,
+                        sw_desc="pyofagent",
+                        serial_num=self.backend.get_serial_num(),
+                        dp_desc=self.backend.get_dp_desc())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_FLOW:
+                    # flow stats requested
+                    msg = ofp.message.flow_stats_reply(
+                        xid=req.xid, entries=self.store.flow_list())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_TABLE:
+                    # table stats requested
+                    msg = ofp.message.table_stats_reply(
+                        xid=req.xid, entries=self.store.table_stats())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_PORT:
+                    # port list
+                    msg = ofp.message.port_stats_reply(
+                        xid=req.xid, entries=self.store.port_stats())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_GROUP:
+                    msg = ofp.message.group_stats_reply(
+                        xid=req.xid, entries=self.store.group_stats())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_GROUP_DESC:
+                    msg = ofp.message.group_desc_stats_reply(
+                        xid=req.xid, entries=self.store.group_list())
+                    cxn.send(msg)
+
+                elif req.stats_type == ofp.OFPST_METER:
+                    msg = ofp.message.meter_stats_reply(
+                        xid=req.xid, entries=[])
+                    cxn.send(msg)
+
+                else:
+                    logging.error("Unhandled stats type: %d in request:"
+                                  % req.stats_type)
+                    logging.error(pp(req))
+
+            elif req.type == ofp.OFPT_SET_CONFIG:
+                # TODO ignored for now
+                pass
+
+            elif req.type == ofp.OFPT_BARRIER_REQUEST:
+                # TODO this will be the place to commit all changes before
+                # replying
+                # but now we send a reply right away
+                msg = ofp.message.barrier_reply(xid=req.xid)
+                cxn.send(msg)
+
+            elif req.type == ofp.OFPT_GET_CONFIG_REQUEST:
+                # send back configuration reply
+                msg = ofp.message.get_config_reply(
+                    xid=req.xid, miss_send_len=ofp.OFPCML_NO_BUFFER)
+                cxn.send(msg)
+
+            elif req.type == ofp.OFPT_ROLE_REQUEST:
+                # TODO this is where we shall manage which connection is active
+                # now we simply verify that the role request is for active and
+                # reply
+                if req.role != ofp.OFPCR_ROLE_MASTER:
+                    self.stop()
+                    sys.exit(1)
+                msg = ofp.message.role_reply(
+                    xid=req.xid, role=req.role,
+                    generation_id=req.generation_id)
+                cxn.send(msg)
+
+            elif req.type == ofp.OFPT_PACKET_OUT:
+                in_port = req.in_port
+                data = req.data
+                for action in req.actions:
+                    if action.type == ofp.OFPAT_OUTPUT:
+                        port = action.port
+                        self.backend.packet_out(in_port, port, data)
+                    else:
+                        logging.warn("Unhandled packet out action type %s"
+                                     % action.type)
+
+            elif req.type == ofp.OFPT_FLOW_MOD:
+
+                command = req._command
+
+                if command == ofp.OFPFC_ADD:
+                    self.store.flow_add(req)
+
+                elif command == ofp.OFPFC_DELETE:
+                    self.store.flow_delete(req)
+
+                elif command == ofp.OFPFC_DELETE_STRICT:
+                    self.store.flow_delete_strict(req)
+
+                elif command == ofp.OFPFC_MODIFY:
+                    self.store.flow_modify(req)
+
+                elif command == ofp.OFPFC_MODIFY_STRICT:
+                    self.store.flow_modify_strict(req)
+
+                else:
+                    logging.warn("Unhandled flow mod command %s in message:"
+                                 % command)
+                    logging.warn(pp(req))
+
+            elif req.type == ofp.OFPT_GROUP_MOD:
+
+                command = req.command
+
+                if command == ofp.OFPGC_DELETE:
+                    self.store.group_delete(req)
+
+                elif command == ofp.OFPGC_ADD:
+                    self.store.group_add(req)
+
+                elif command == ofp.OFPGC_MODIFY:
+                    self.store.group_modify(req)
+
+                else:
+                    logging.warn("Unhandled group command %s in message:"
+                                 % command)
+                    logging.warn(pp(req))
+
+            else:
+                logging.warn("Unhandled message from controller:")
+                logging.warn(pp(req))
+
diff --git a/obsolete/backends/__init__.py b/obsolete/backends/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/obsolete/backends/__init__.py
diff --git a/obsolete/backends/mock.py b/obsolete/backends/mock.py
new file mode 100644
index 0000000..75c9c5a
--- /dev/null
+++ b/obsolete/backends/mock.py
@@ -0,0 +1,195 @@
+"""
+Mock backend for testing purposes
+"""
+
+import logging
+import os
+import sys
+from threading import Thread
+
+from hexdump import hexdump
+from scapy.all import Ether, IP, UDP, Dot1Q, sendp, sniff
+
+# VERY IMPORTANT:
+# Without the below hack, scapy will not properly receive VLAN
+# header (see http://stackoverflow.com/questions/18994242/why-isnt-scapy-capturing-vlan-tag-information).
+#
+from scapy.all import conf, ETH_P_ALL
+import pcap
+conf.use_pcap = True
+import scapy.arch.pcapdnet
+assert conf.L2listen.__name__=='L2pcapListenSocket'
+
+sys.path.insert(1, os.path.join(sys.path[0], '..'))
+
+import loxi.of13 as ofp
+from ofagent.utils import mac_str_to_tuple
+
+
+class MockBackend(object):
+
+    mfr_desc = "Ciena Corp."
+    hw_desc = "mock"
+
+    def __init__(self, store, in_out_iface=None, in_out_stag=None):
+        self.store = store
+        self.add_some_ports()
+        self.in_out_iface = in_out_iface
+        self.in_out_stag = in_out_stag
+        self.agent = None
+        self.in_out_receiver = None
+
+    def set_agent(self, agent):
+        self.agent = agent
+        if self.in_out_iface is not None:
+            self.in_out_receiver = InOutReceiver(self.in_out_iface, agent, self.in_out_stag)
+            self.in_out_receiver.start()
+
+    def stop(self):
+        if self.in_out_receiver is not None:
+            self.in_out_receiver.stop()
+
+    def get_serial_num(self):
+        return "DFG-4567-RTYU-789"
+
+    def get_dp_desc(self):
+        return "mock device"
+
+    def add_some_ports(self):
+        cap = ofp.OFPPF_1GB_FD | ofp.OFPPF_FIBER
+        for pno, mac, nam, cur, adv, sup, spe in (
+                (  1, '00:00:00:00:00:01', 'onu1', cap, cap, cap, ofp.OFPPF_1GB_FD),
+                (  2, '00:00:00:00:00:02', 'onu2', cap, cap, cap, ofp.OFPPF_1GB_FD),
+                (129, '00:00:00:00:00:81', 'olt',  cap, cap, cap, ofp.OFPPF_1GB_FD)
+            ):
+            port = ofp.common.port_desc(pno, mac_str_to_tuple(mac), nam,
+                                        curr=cur, advertised=adv, supported=sup,
+                                        curr_speed=spe, max_speed=spe)
+            self.store.port_add(port)
+
+    def packet_out(self, in_port, out_port, data):
+        in_port = "CONTROLLER" if in_port == ofp.OFPP_CONTROLLER else in_port
+        print "PACKET OUT (%s => %s): " % (in_port, out_port)
+        hexdump(data)
+
+        if self.in_out_iface is not None:
+
+            try:
+                # disect the packet
+                pkt = Ether(data)
+
+                # remove payload from Ether frame
+                payload = pkt.payload
+                payload_type = pkt.type
+                pkt.remove_payload()
+
+                # insert Dot1Q shim with vlan_id = out_port
+
+                if self.in_out_stag is None:
+                    ## WARNING -- This was changed from 0x88a8 to 0x8100 when
+                    ## testing with the Intel XL710 quad 10GE boards.  The
+                    ## XL710 does not support the TPID for the STAG.
+                    ##
+                    ## Long term, it should be changed back to 0x88a8!
+                    ##
+                    pkt.type = 0x8100
+                    new_pkt = pkt / Dot1Q(vlan=out_port, type=payload_type) / payload
+
+                else:
+                    pkt.type = 0x8100
+                    new_pkt = (
+                            pkt /
+                            Dot1Q(vlan=self.in_out_stag, type=0x8100) /
+                            Dot1Q(vlan=out_port, type=payload_type) /
+                            payload)
+
+                # send out the packet
+                sendp(new_pkt, iface=self.in_out_iface)
+
+            except Exception, e:
+                logging.exception("Could not parse packet-out data as scapy.Ether:\n")
+                logging.error(hexdump(data, 'return'))
+
+
+class InOutReceiver(Thread):
+
+    def __init__(self, iface, agent, in_out_stag=None):
+        Thread.__init__(self)
+        self.iface = iface
+        self.finished = False
+        self.agent = agent
+        self.in_out_stag = in_out_stag
+
+    def run(self):
+        # TODO this loop could be reconciled with the ofp Connection to become a
+        # single select loop.
+        self.sock = s = conf.L2listen(
+            type=ETH_P_ALL,
+            iface=self.iface,
+            filter='inbound'
+        )
+        while not self.finished:
+            try:
+                sniffed = sniff(1, iface=self.iface, timeout=1, opened_socket=s)
+                print 'Sniffer received %d packet(s)' % len(sniffed)
+                for pkt in sniffed:
+                    self.forward_packet(pkt)
+
+            except Exception, e:
+                logging.error("scapy.sniff error: %s" % e)
+
+    def stop(self):
+        """
+        Signal the thread to exit and wait for it
+        """
+        assert not self.finished
+        logging.debug("Stop sniffing on in-out channel")
+        self.finished = True
+        self.sock.close()
+        self.join()
+
+    def forward_packet(self, pkt):
+        print "Received packet:"
+        hexdump(str(pkt))
+        pkt.show()
+
+        try:
+            assert isinstance(pkt, Ether)
+            assert isinstance(pkt.getlayer(1), Dot1Q)
+            dot1q = pkt.getlayer(1)
+            assert isinstance(dot1q, Dot1Q)
+
+            if self.in_out_stag is None:
+                payload = dot1q.payload
+                payload_type = dot1q.type
+                pkt.remove_payload()
+
+                pkt.type = payload_type
+                new_pkt = pkt / payload
+                in_port = dot1q.vlan
+
+            else:
+                if dot1q.vlan != self.in_out_stag:
+                    print 'Dropping packet because outer tag %d does not match %d' % (
+                        dot1q.vlan, self.in_out_stag)
+                    return
+                dot1q_inner = dot1q.getlayer(1)
+                assert isinstance(dot1q_inner, Dot1Q)
+                payload = dot1q_inner.payload
+                payload_type = dot1q_inner.type
+                pkt.remove_payload()
+
+                pkt.type = payload_type
+                new_pkt = pkt / payload
+                in_port = dot1q_inner.vlan
+
+            if self.agent is not None:
+                self.agent.send_packet_in(str(new_pkt), in_port=in_port)
+                print 'new packet forwarded to controller (with in_port=%d):' % in_port
+                new_pkt.show()
+
+        except Exception, e:
+            logging.exception("Unexpected packet format received by InOutReceiver: %s" % e)
+            logging.error(hexdump(str(pkt), 'return'))
+
+
diff --git a/obsolete/loxi b/obsolete/loxi
new file mode 120000
index 0000000..1badc33
--- /dev/null
+++ b/obsolete/loxi
@@ -0,0 +1 @@
+../ofagent/loxi
\ No newline at end of file
diff --git a/obsolete/main.py b/obsolete/main.py
new file mode 100644
index 0000000..f404df5
--- /dev/null
+++ b/obsolete/main.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+import logging
+
+from argparse import ArgumentParser
+from store import ObjectStore
+from backends.mock import MockBackend
+from agent import Agent
+
+
+def parse_options():
+    parser = ArgumentParser("pyofagent - Python-based Open Flow Agent")
+    parser.add_argument("-c", "--controller", #dest="controller",
+                        help="Controller host:port to connect to", metavar="HOST:PORT",
+                        default="localhost:6633")
+    parser.add_argument("-d", "--devid", dest="datapath_id",
+                        help="Device identified", metavar="DEVID",
+                        default=42)
+    parser.add_argument("-v", "--verbose", action='store_true', #dest=verbose,
+                        default="enable verbose logging (log-level is DEBUG)")
+    parser.add_argument("-I", "--in-out-iface", metavar="IN-OUT-IFACE",
+                        help="Local interface to receve/send in-out frames",)
+    parser.add_argument("-S", "--in-out-stag", metavar="IN-OUT-STAG",
+                        help="Expect/Apply given s-tag when receiving/sending frames"+
+                             "at the in-out interface")
+    return parser.parse_args()
+
+
+def main():
+
+    args = parse_options()
+
+    logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+    store = ObjectStore()
+    backend = MockBackend(store, in_out_iface=args.in_out_iface,
+                                 in_out_stag=None if args.in_out_stag is None else int(args.in_out_stag))
+    agent = Agent(args.controller, int(args.datapath_id), store, backend)
+    store.set_agent(agent)
+    backend.set_agent(agent)
+
+    try:
+        agent.run()
+    except KeyboardInterrupt:
+        logging.info("Ctrl-c received! Shutting down connection and exiting...")
+        agent.stop()
+        backend.stop()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/obsolete/oftest/Makefile b/obsolete/oftest/Makefile
new file mode 100644
index 0000000..e0854b1
--- /dev/null
+++ b/obsolete/oftest/Makefile
@@ -0,0 +1,26 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifeq ($(VOLTHA_BASE)_set,_set)
+$(error If your getting this error, please type ". ./env.sh" at the top directory)
+endif
+
+include $(VOLTHA_BASE)/setup.mk
+
+PYTHONFILES:=$(wildcard *.py)
+
+flake8: 
+	$(FLAKE8) $(PYTHONFILES)
diff --git a/obsolete/oftest/README.md b/obsolete/oftest/README.md
new file mode 100644
index 0000000..c8a6b79
--- /dev/null
+++ b/obsolete/oftest/README.md
@@ -0,0 +1,52 @@
+## oftest test-cases to test the pyofagent
+
+The main purpose of these tests is to verify correct packet-in and packet-out behavior of
+pyofagent. Pyofagent can send and receive raw packets at a local interface. The interface
+can be configured with the --in-out-iface=<link name> command line option. We will refer
+to this interface below as the "in-out" interface.
+
+Incoming frames arriving at the in-out interface are assumed to be single-tagged with a VLAN
+id that represents a logical port number. Pyofagent will forward such frames to the controller
+as OpenFlow "packet-in" messages, accompanied with the extracted port number is in_port
+metadata.
+
+Conversely, when pyofagent receives an OpenFlow "packet-out" message from the controller, it
+will encapsualte the frame in an VLAN header with a VLAN id representing the out_port. If the
+out_port is a list, pyofagent will replicate the frame and send it out once at each specified
+port (that is, one with each of the specified VLAN ids).
+
+### Setup required for the test:
+
+You must have oftest checked out in a directory at the same level as the pyofagent root directory.
+For instance, if pyofagent is under ~/olt, then oftest must be checked out also under ~/olt:
+
+```
+cd ~/olt
+git clone git@bitbucket.org:corddesign/pyofagent.git
+```
+
+In order to run these test, we need to create a veth port pair on the host:
+
+```
+sudo ip link add ep1 type veth peer name ep2
+sudo ifconfig ep1 up
+sudo ifconfig ep2 up
+```
+
+To start pyofagent in the in-out mode, start it with the --in-out-iface option, such as:
+
+```
+cd ~/olt
+sudo python pyofagent/pyofagent/main.py -v --in-out-iface=ep2
+```
+
+Now we can run the tests:
+
+```
+cd ~/olt
+sudo ./oftest/oft --test-dir=pyofagent/oftest/ -i 1@ep1 --port 6633 -V 1.3 \
+        --debug=verbose -t "in_out_port=1"
+```
+
+There are currently two tests, they should both pass. This proves that the packet in/out behavior of pyofagent is healthy.
+
diff --git a/obsolete/oftest/agenttest.py b/obsolete/oftest/agenttest.py
new file mode 100644
index 0000000..f22e9f0
--- /dev/null
+++ b/obsolete/oftest/agenttest.py
@@ -0,0 +1,84 @@
+import logging
+from oftest.testutils import *
+import oftest.base_tests as base_tests
+import ofp
+from scapy.all import Ether
+from hexdump import hexdump
+
+
+in_out_port = test_param_get("in_out_port", "ep1")
+
+
+class PacketOutTest(base_tests.SimpleDataPlane):
+    """
+    Test that a packet sent out by the controller is forwarded to the in-out
+    test port with the out_port encoded as a Dot1Q shim vlan_id
+    """
+
+    def runTest(self):
+        logging.info("Running %s" % self.__class__.__name__)
+
+        # These cleanups are not really needed. We do it to verify connection
+        # to the agent.
+        delete_all_flows(self.controller)
+        delete_all_groups(self.controller)
+
+        # Send packet out and capture it at the in-out port
+        pktlen = 60
+        pkt_kws = dict(
+            eth_src='de:ad:be:ef:00:01',
+            eth_dst='de:ad:be:ef:00:02',
+            ip_src='192.168.0.1',
+            ip_dst='192.168.0.2'
+        )
+
+        pkt = simple_udp_packet(pktlen=pktlen, **pkt_kws)
+        expected_pkt = simple_udp_packet(pktlen + 4, dl_vlan_enable=True,
+                                         vlan_vid=1, **pkt_kws)
+
+        msg = ofp.message.packet_out(
+            in_port=ofp.OFPP_CONTROLLER,
+            actions=[ofp.action.output(port=in_out_port)],
+            buffer_id=ofp.OFP_NO_BUFFER,
+            data=str(pkt)
+        )
+        self.controller.message_send(msg)
+        verify_no_errors(self.controller)
+
+        # now verify that we received the correct packet with proper vlan tag
+        verify_packet(self, str(expected_pkt), in_out_port)
+
+
+class PacketInTest(base_tests.SimpleDataPlane):
+    """
+    Test that a packet arriving at the in-out test port is forwarded to
+    the controller as a packet-in message with in_port being the vlan_id
+    of the outer Dot1Q shim (which is popped before the packet is sent in)
+    """
+
+    def runTest(self):
+        logging.info("Running %s" % self.__class__.__name__)
+
+        # These cleanups are not really needed. We do it to verify connection
+        # to the agent.
+        delete_all_flows(self.controller)
+        delete_all_groups(self.controller)
+
+        # Send packet out and capture it at the in-out port
+        pktlen = 60
+        pkt_kws = dict(
+            eth_src='de:ad:be:ef:00:01',
+            eth_dst='de:ad:be:ef:00:02',
+            ip_src='192.168.0.1',
+            ip_dst='192.168.0.2'
+        )
+
+        pkt = simple_udp_packet(pktlen + 4, dl_vlan_enable=True,
+                                vlan_vid=1, **pkt_kws)
+        expected_pkt = simple_udp_packet(pktlen=pktlen, **pkt_kws)
+
+        # send a test packet into the in_out_port
+        self.dataplane.send(in_out_port, str(pkt))
+
+        # expect it to become a packet_in
+        verify_packet_in(self, str(expected_pkt), 1, ofp.OFPR_ACTION)
diff --git a/obsolete/store.py b/obsolete/store.py
new file mode 100644
index 0000000..0bd04d6
--- /dev/null
+++ b/obsolete/store.py
@@ -0,0 +1,335 @@
+"""
+Internal state of the device
+"""
+
+import logging
+from ofagent.utils import pp
+import loxi.of13 as ofp
+
+
+class GroupEntry(object):
+    def __init__(self, group_desc, group_stats):
+        assert isinstance(group_desc, ofp.group_desc_stats_entry)
+        assert isinstance(group_stats, ofp.group_stats_entry)
+        self.group_desc = group_desc
+        self.group_stats = group_stats
+
+
+def flow_stats_entry_from_flow_mod_message(fmm):
+    assert isinstance(fmm, ofp.message.flow_mod)
+
+    # extract a flow stats entry from a flow_mod message
+    kw = fmm.__dict__
+
+    # drop these from the object
+    for k in ('xid', 'cookie_mask', 'out_port', 'buffer_id', 'out_group'):
+        del kw[k]
+    flow = ofp.flow_stats_entry(
+        duration_sec=0,
+        duration_nsec=0,
+        packet_count=0,
+        byte_count=0,
+        **kw
+    )
+    return flow
+
+def group_entry_from_group_mod_message(gmm):
+    assert isinstance(gmm, ofp.message.group_mod)
+
+    kw = gmm.__dict__
+
+    # drop these attributes from the object:
+    for k in ('xid',):
+        del kw[k]
+
+    group_desc = ofp.group_desc_stats_entry(
+        group_type=gmm.group_type,
+        group_id=gmm.group_id,
+        buckets=gmm.buckets
+    )
+
+    group_stats = ofp.group_stats_entry(
+        group_id=gmm.group_id
+    )
+
+    return GroupEntry(group_desc, group_stats)
+
+
+class ObjectStore(object):
+
+    def __init__(self):
+        self.ports = []  # list of ofp.common.port_desc
+        self.flows = []  # list of ofp.common.flow_stats_entry
+        self.groups = {} # dict of (ofp.group_desc_stats_entry, ofp.group_stats_entry) tuples,
+                         # keyed by the group_id field
+        self.agent = None
+
+    def set_agent(self, agent):
+        """Set agent reference"""
+        self.agent = agent
+
+    def signal_flow_mod_error(self, code, flow_mod):
+        """Forward error to agent"""
+        if self.agent is not None:
+            agent.signal_flow_mod_error(code, flow_mod)
+
+    def signal_flow_removal(self, flow):
+        """Forward flow removal notification to agent"""
+        if self.agent is not None:
+            agent.signal_flow_removal(flow)
+
+    def signal_group_mod_error(self, code, group_mod):
+        if self.agent is not None:
+            agent.signal_group_mod_error(code, group_mod)
+
+    ## <=========================== PORT HANDLERS ==================================>
+
+    def port_list(self):
+        return self.ports
+
+    def port_add(self, port):
+        self.ports.append(port)
+
+    def port_stats(self):
+        logging.warn("port_stats() not yet implemented")
+        return []
+
+    ## <=========================== FLOW HANDLERS ==================================>
+
+    def flow_add(self, flow_add):
+        assert isinstance(flow_add, ofp.message.flow_add)
+        assert flow_add.cookie_mask == 0
+
+        check_overlap = flow_add.flags & ofp.OFPFF_CHECK_OVERLAP
+        if check_overlap:
+            if self._flow_find_overlapping_flows(flow_add, return_on_first_hit=True):
+                self.signal_flow_mod_error(ofp.OFPFMFC_OVERLAP, flow_add)
+            else:
+                # free to add as new flow
+                flow = flow_stats_entry_from_flow_mod_message(flow_add)
+                self.flows.append(flow)
+
+        else:
+            flow = flow_stats_entry_from_flow_mod_message(flow_add)
+            idx = self._flow_find(flow)
+            if idx >= 0:
+                old_flow = self.flows[idx]
+                assert isinstance(old_flow, ofp.common.flow_stats_entry)
+                if not (flow_add.flags & ofp.OFPFF_RESET_COUNTS):
+                    flow.byte_count = old_flow.byte_count
+                    flow.packet_count = old_flow.packet_count
+                self.flows[idx] = flow
+
+            else:
+                self.flows.append(flow)
+
+    def flow_delete_strict(self, flow_delete_strict):
+        assert isinstance(flow_delete_strict, ofp.message.flow_delete_strict)
+        flow = flow_stats_entry_from_flow_mod_message(flow_delete_strict)
+        idx = self._flow_find(flow)
+        if (idx >= 0):
+            del self.flows[idx]
+            logging.info("flow removed:\n%s" % pp(flow))
+        else:
+            logging.error('Requesting strict delete of:\n%s\nwhen flow table is:\n\n' % pp(flow))
+            for f in self.flows:
+                print pp(f)
+
+    def flow_delete(self, flow_delete):
+        assert isinstance(flow_delete, ofp.message.flow_delete)
+
+        # build a list of what to keep vs what to delete
+        to_keep = []
+        to_delete = []
+        for f in self.flows:
+            list_to_append = to_delete if self._flow_matches_spec(f, flow_delete) else to_keep
+            list_to_append.append(f)
+
+        # replace flow table with keepers
+        self.flows = to_keep
+
+        # send notifications for discarded flows as required by OF spec
+        self._announce_flows_deleted(to_delete)
+
+    def flow_modify_strict(self, flow_obj):
+        raise Exception("flow_modify_strict(): Not implemented yet")
+
+    def flow_modify(self, flow_obj):
+        raise Exception("flow_modify(): Not implemented yet")
+
+    def flow_list(self):
+        return self.flows
+
+    def _flow_find_overlapping_flows(self, flow_mod, return_on_first_hit=False):
+        """
+        Return list of overlapping flow(s)
+        Two flows overlap if a packet may match both and if they have the same priority.
+        """
+
+    def _flow_find(self, flow):
+        for i, f in enumerate(self.flows):
+            if self._flows_match(f, flow):
+                return i
+        return -1
+
+    def _flows_match(self, f1, f2):
+        keys_matter = ('table_id', 'priority', 'flags', 'cookie', 'match')
+        for key in keys_matter:
+            if getattr(f1, key) != getattr(f2, key):
+                return False
+        return True
+
+    def _flow_matches_spec(self, flow, flow_mod):
+        """
+        Return True if a given flow (flow_stats_entry) is "covered" by the wildcarded flow
+        mod or delete spec (as defined in the flow_mod or flow_delete message); otherwise
+        return False.
+        """
+        #import pdb
+        #pdb.set_trace()
+
+        assert isinstance(flow, ofp.common.flow_stats_entry)
+        assert isinstance(flow_mod, (ofp.message.flow_delete, ofp.message.flow_mod))
+
+        # Check if flow.cookie is a match for flow_mod cookie/cookie_mask
+        if (flow.cookie & flow_mod.cookie_mask) != (flow_mod.cookie & flow_mod.cookie_mask):
+            return False
+
+        # Check if flow.table_id is covered by flow_mod.table_id
+        if flow_mod.table_id != ofp.OFPTT_ALL and flow.table_id != flow_mod.table_id:
+            return False
+
+        # Check out_port
+        if flow_mod.out_port != ofp.OFPP_ANY and not self._flow_has_out_port(flow, flow_mod.out_port):
+            return False
+
+        # Check out_group
+        if flow_mod.out_group != ofp.OFPG_ANY and not self._flow_has_out_group(flow, flow_mod.out_group):
+            return False
+
+        # Priority is ignored
+
+        # Check match condition
+        # If the flow_mod match field is empty, that is a special case and indicate the flow entry matches
+        match = flow_mod.match
+        assert isinstance(match, ofp.common.match_v3)
+        if not match.oxm_list:
+            return True # If we got this far and the match is empty in the flow spec, than the flow matches
+        else:
+            raise NotImplementedError("_flow_matches_spec(): No flow match analysys yet")
+
+    def _flow_has_out_port(self, flow, out_port):
+        """Return True if flow has a output command with the given out_port"""
+        assert isinstance(flow, ofp.common.flow_stats_entry)
+        for instruction in flow.instructions:
+            assert isinstance(instruction, ofp.instruction.instruction)
+            if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+                assert isinstance(instruction, ofp.instruction.apply_actions)
+                for action in instruction.actions:
+                    assert isinstance(action, ofp.action.action)
+                    if action.type == ofp.OFPAT_OUTPUT:
+                        assert isinstance(action, ofp.action.output)
+                        if action.port == out_port:
+                            return True
+
+        # otherwise...
+        return False
+
+    def _flow_has_out_group(self, flow, out_group):
+        """Return True if flow has a output command with the given out_group"""
+        assert isinstance(flow, ofp.common.flow_stats_entry)
+        for instruction in flow.instructions:
+            assert isinstance(instruction, ofp.instruction.instruction)
+            if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+                assert isinstance(instruction, ofp.instruction.apply_actions)
+                for action in instruction.actions:
+                    assert isinstance(action, ofp.action.action)
+                    if action.type == ofp.OFPAT_GROUP:
+                        assert isinstance(action, ofp.action.group)
+                        if action.group_id == out_group:
+                            return True
+
+        # otherwise...
+        return False
+
+    def _flows_delete_by_group_id(self, group_id):
+        """Delete any flow referring to given group id"""
+        to_keep = []
+        to_delete = []
+        for f in self.flows:
+            list_to_append = to_delete if self._flow_has_out_group(f, group_id) else to_keep
+            list_to_append.append(f)
+
+        # replace flow table with keepers
+        self.flows = to_keep
+
+        # send notifications for discarded flows as required by OF spec
+        self._announce_flows_deleted(to_delete)
+
+    def _announce_flows_deleted(self, flows):
+        for f in flows:
+            self._announce_flow_deleted(f)
+
+    def _announce_flow_deleted(self, flow):
+        """Send notification to controller if flow is flagged with OFPFF_SEND_FLOW_REM flag"""
+        if flow.flags & ofp.OFPFF_SEND_FLOW_REM:
+            raise NotImplementedError("_announce_flow_deleted()")
+
+
+    ## <=========================== GROUP HANDLERS =================================>
+
+    def group_add(self, group_add):
+        assert isinstance(group_add, ofp.message.group_add)
+        if group_add.group_id in self.groups:
+            self.signal_group_mod_error(ofp.OFPGMFC_GROUP_EXISTS, group_add)
+        else:
+            group_entry = group_entry_from_group_mod_message(group_add)
+            self.groups[group_add.group_id] = group_entry
+
+    def group_modify(self, group_modify):
+        assert isinstance(group_modify, ofp.message.group_modify)
+        if group_modify.group_id not in self.groups:
+            self.signal_group_mod_error(ofp.OFPGMFC_INVALID_GROUP, group_modify)
+        else:
+            # replace existing group entry with new group definition
+            group_entry = group_entry_from_group_mod_message(group_modify)
+            self.groups[group_modify.group_id] = group_entry
+
+    def group_delete(self, group_delete):
+        assert isinstance(group_delete, ofp.message.group_mod)
+        if group_delete.group_id == ofp.OFPG_ALL:  # drop all groups
+            # we must delete all flows that point to this group and signal controller as
+            # requested by the flows' flag
+
+            self.groups = {}
+            logging.info("all groups deleted")
+
+        else:
+            if group_delete.group_id not in self.groups:
+                # per the spec, this is silent;y ignored
+                return
+
+            else:
+                self._flows_delete_by_group_id(group_delete.group_id)
+                del self.groups[group_delete.group_id]
+                logging.info("group %d deleted" % group_delete.group_id)
+
+    def group_list(self):
+        return [group_entry.group_desc for group_entry in self.groups.values()]
+
+    def group_stats(self):
+        return [group_entry.group_stats for group_entry in self.groups.values()]
+
+    ## <=========================== TABLE HANDLERS =================================>
+
+    def table_stats(self):
+        """Scan through flow entries and create table stats"""
+        stats = {}
+        for flow in self.flows:
+            table_id = flow.table_id
+            entry = stats.setdefault(table_id, ofp.common.table_stats_entry(table_id))
+            entry.active_count += 1
+            entry.lookup_count += 1  # FIXME how to count these?
+            entry.matched_count += 1  # FIXME how to count these?
+            stats[table_id] = entry
+        return sorted(stats.values(), key=lambda e: e.table_id)