This commit cleans up the python directory to ensure the adapters
and the cli runs properly.

Change-Id: Ic68a3ecd1f16a5af44296e3c020c808b185f4c18
diff --git a/compose/adapters-ponsim.yml b/compose/adapters-ponsim.yml
index 145d99b..86a30d2 100644
--- a/compose/adapters-ponsim.yml
+++ b/compose/adapters-ponsim.yml
@@ -23,7 +23,7 @@
         max-size: "10m"
         max-file: "3"
     command: [
-      "/adapters/adapters/ponsim_olt/main.py",
+      "/voltha/python/adapters/ponsim_olt/main.py",
       "-v",
       "--name=ponsim_olt",
       "--kafka_adapter=${DOCKER_HOST_IP}:9092",
@@ -41,7 +41,7 @@
         max-size: "10m"
         max-file: "3"
     command: [
-      "/adapters/adapters/ponsim_onu/main.py",
+      "/voltha/python/adapters/ponsim_onu/main.py",
       "-v",
       "--name=ponsim_onu",
       "--kafka_adapter=${DOCKER_HOST_IP}:9092",
diff --git a/compose/cli.yml b/compose/cli.yml
new file mode 100644
index 0000000..e8ff4ee
--- /dev/null
+++ b/compose/cli.yml
@@ -0,0 +1,37 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+version: "2"
+services:
+  cli:
+    image: "${REGISTRY}${REPOSITORY}voltha-cli:latest"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    environment:
+      DOCKER_HOST_IP: "${DOCKER_HOST_IP}"
+    entrypoint:
+    - /voltha/python/cli/setup.sh
+    - -g 192.168.0.14:50057
+    networks:
+    - default
+    ports:
+    - "5022:22"
+
+networks:
+  default:
+    driver: bridge
diff --git a/protos/core_adapter.proto b/protos/core_adapter.proto
index c995b0d..24390f5 100644
--- a/protos/core_adapter.proto
+++ b/protos/core_adapter.proto
@@ -21,9 +21,15 @@
     bool val = 1;
 }
 
-enum ErrorCode {
+message Packet {
+    bytes payload = 1;
+}
+
+message ErrorCode {
+    enum codes {
         UNSUPPORTED_REQUEST = 0;
         INVALID_PARAMETERS = 1;
+    }
 }
 
 message Error {
diff --git a/protos/voltha.proto b/protos/voltha.proto
index 7574dd2..e03442a 100644
--- a/protos/voltha.proto
+++ b/protos/voltha.proto
@@ -445,6 +445,23 @@
         option (voltha.yang_xml_tag).xml_tag = 'device_groups';
     }
 
+    // Stream control packets to the dataplane
+    rpc StreamPacketsOut(stream openflow_13.PacketOut)
+        returns(google.protobuf.Empty) {
+        // This does not have an HTTP representation
+    }
+
+    // Receive control packet stream
+    rpc ReceivePacketsIn(google.protobuf.Empty)
+        returns(stream openflow_13.PacketIn) {
+        // This does not have an HTTP representation
+    }
+
+    rpc ReceiveChangeEvents(google.protobuf.Empty)
+        returns(stream openflow_13.ChangeEvent) {
+        // This does not have an HTTP representation
+    }
+
     // Get additional information on a device group
     rpc GetDeviceGroup(ID) returns(DeviceGroup) {
         option (google.api.http) = {
@@ -452,20 +469,6 @@
         };
     }
 
-    // Stream control packets to the dataplane
-    rpc StreamPacketsOut (stream openflow_13.PacketOut) returns (google.protobuf.Empty) {
-        // This does not have an HTTP representation
-    }
-
-    // Receive control packet stream
-    rpc ReceivePacketsIn (google.protobuf.Empty) returns (stream openflow_13.PacketIn) {
-        // This does not have an HTTP representation
-    }
-
-    rpc ReceiveChangeEvents (google.protobuf.Empty) returns (stream openflow_13.ChangeEvent) {
-        // This does not have an HTTP representation
-    }
-
     rpc CreateAlarmFilter(AlarmFilter) returns(AlarmFilter) {
         option (google.api.http) = {
             post: "/api/v1/alarm_filters"
diff --git a/python/adapters/Makefile b/python/Makefile
similarity index 97%
rename from python/adapters/Makefile
rename to python/Makefile
index 2531985..b19472f 100644
--- a/python/adapters/Makefile
+++ b/python/Makefile
@@ -88,7 +88,7 @@
 
 FETCH_IMAGE_LIST = $(shell echo $(FETCH_BUILD_IMAGE_LIST) $(FETCH_COMPOSE_IMAGE_LIST) $(FETCH_K8S_IMAGE_LIST) | tr ' ' '\n' | sort -u)
 
-.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 base ponsim_olt ponsim_onu protos kafka common start stop tag push pull
+.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 base ponsim_olt ponsim_onu protos cli kafka common start stop tag push pull
 
 # This should to be the first and default target in this Makefile
 help:
@@ -154,6 +154,9 @@
 adapter_ponsim_onu:
 	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-onu:${TAG} -f docker/Dockerfile.adapter_ponsim_onu .
 
+cli:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-cli:${TAG} -f docker/Dockerfile.cli .
+
 tag: $(patsubst  %,%.tag,$(DOCKER_IMAGE_LIST))
 
 push: tag $(patsubst  %,%.push,$(DOCKER_IMAGE_LIST))
diff --git a/python/adapters/protos/__init__.py b/python/__init__.py
similarity index 100%
copy from python/adapters/protos/__init__.py
copy to python/__init__.py
diff --git a/python/adapters/common/__init__.py b/python/adapters/common/__init__.py
index b0fb0b2..58aca1e 100644
--- a/python/adapters/common/__init__.py
+++ b/python/adapters/common/__init__.py
@@ -1,10 +1,10 @@
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-# http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/python/adapters/common/event_bus.py b/python/adapters/common/event_bus.py
deleted file mode 100644
index e717c16..0000000
--- a/python/adapters/common/event_bus.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-A simple internal pub/sub event bus with topics and filter-based registration.
-"""
-import re
-
-import structlog
-
-
-log = structlog.get_logger()
-
-
-class _Subscription(object):
-
-    __slots__ = ('bus', 'predicate', 'callback', 'topic')
-    def __init__(self, bus, predicate, callback, topic=None):
-        self.bus = bus
-        self.predicate = predicate
-        self.callback = callback
-        self.topic = topic
-
-
-class EventBus(object):
-
-    def __init__(self):
-        self.subscriptions = {}  # topic -> list of _Subscription objects
-                                 # topic None holds regexp based topic subs.
-        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
-
-    def list_subscribers(self, topic=None):
-        if topic is None:
-            return sum(self.subscriptions.itervalues(), [])
-        else:
-            if topic in self.subscriptions:
-                return self.subscriptions[topic]
-            else:
-                return []
-
-    @staticmethod
-    def _get_topic_key(topic):
-        if isinstance(topic, str):
-            return topic
-        elif hasattr(topic, 'match'):
-            return None
-        else:
-            raise AttributeError('topic not a string nor a compiled regex')
-
-    def subscribe(self, topic, callback, predicate=None):
-        """
-        Subscribe to given topic with predicate and register the callback
-        :param topic: String topic (explicit) or regexp based topic filter.
-        :param callback: Callback method with signature def func(topic, msg)
-        :param predicate: Optional method/function signature def predicate(msg)
-        :return: Subscription object which can be used to unsubscribe
-        """
-        subscription = _Subscription(self, predicate, callback, topic)
-        topic_key = self._get_topic_key(topic)
-        self.subscriptions.setdefault(topic_key, []).append(subscription)
-        self.subs_topic_map[subscription] = topic_key
-        return subscription
-
-    def unsubscribe(self, subscription):
-        """
-        Remove given subscription
-        :param subscription: subscription object as was returned by subscribe
-        :return: None
-        """
-        topic_key = self.subs_topic_map[subscription]
-        self.subscriptions[topic_key].remove(subscription)
-
-    def publish(self, topic, msg):
-        """
-        Publish given message to all subscribers registered with topic taking
-        the predicate functions into account.
-        :param topic: String topic
-        :param msg: Arbitrary python data as message
-        :return: None
-        """
-        from copy import copy
-
-        def passes(msg, predicate):
-            try:
-                return predicate(msg)
-            except Exception, e:
-                return False  # failed predicate function treated as no match
-
-        # lookup subscribers with explicit topic subscriptions
-        subscribers = self.subscriptions.get(topic, [])
-
-        # add matching regexp topic subscribers
-        subscribers.extend(s for s in self.subscriptions.get(None, [])
-                           if s.topic.match(topic))
-
-        # iterate over a shallow-copy of subscribers
-        for candidate in copy(subscribers):
-            predicate = candidate.predicate
-            if predicate is None or passes(msg, predicate):
-                try:
-                    candidate.callback(topic, msg)
-                except Exception, e:
-                    log.exception('callback-failed', e=repr(e), topic=topic)
-
-
-
-default_bus = EventBus()
-
-
-class EventBusClient(object):
-    """
-    Primary interface to the EventBus. Usage:
-
-    Publish:
-    >>> events = EventBusClient()
-    >>> msg = dict(a=1, b='foo')
-    >>> events.publish('a.topic', msg)
-
-    Subscribe to get all messages on specific topic:
-    >>> def got_event(topic, msg):
-    >>>     print topic, ':', msg
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', got_event)
-
-    Subscribe to get messages matching predicate on specific topic:
-    >>> def got_event(topic, msg):
-    >>>     print topic, ':', msg
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
-
-    Use a DeferredQueue to buffer incoming messages
-    >>> queue = DeferredQueue()
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
-
-    """
-    def __init__(self, bus=None):
-        """
-        Obtain a client interface for the pub/sub event bus.
-        :param bus: An optional specific event bus. Inteded for mainly test
-        use. If not provided, the process default bus will be used, which is
-        the preferred use (a process shall not need more than one bus).
-        """
-        self.bus = bus or default_bus
-
-    def publish(self, topic, msg):
-        """
-        Publish given msg to given topic.
-        :param topic: String topic
-        :param msg: Arbitrary python data as message
-        :return: None
-        """
-        self.bus.publish(topic, msg)
-
-    def subscribe(self, topic, callback, predicate=None):
-        """
-        Subscribe to given topic with predicate and register the callback
-        :param topic: String topic (explicit) or regexp based topic filter.
-        :param callback: Callback method with signature def func(topic, msg)
-        :param predicate: Optional method/function with signature
-        def predicate(msg)
-        :return: Subscription object which can be used to unsubscribe
-        """
-        return self.bus.subscribe(topic, callback, predicate)
-
-    def unsubscribe(self, subscription):
-        """
-        Remove given subscription
-        :param subscription: subscription object as was returned by subscribe
-        :return: None
-        """
-        return self.bus.unsubscribe(subscription)
-
-    def list_subscribers(self, topic=None):
-        """
-        Return list of subscribers. If topci is provided, it is filtered for
-        those subscribing to the topic.
-        :param topic: Optional topic
-        :return: List of subscriptions
-        """
-        return self.bus.list_subscribers(topic)
diff --git a/python/adapters/common/frameio/frameio.py b/python/adapters/common/frameio/frameio.py
index 2f68ef8..0657257 100644
--- a/python/adapters/common/frameio/frameio.py
+++ b/python/adapters/common/frameio/frameio.py
@@ -40,10 +40,10 @@
 from twisted.internet import reactor
 from zope.interface import implementer
 
-from adapters.common.utils.registry import IComponent
+from python.common.utils.registry import IComponent
 
 if sys.platform.startswith('linux'):
-    from adapters.common.frameio.third_party.oftest import afpacket, netutils
+    from third_party.oftest import afpacket, netutils
 elif sys.platform == 'darwin':
     from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
 
diff --git a/python/common/kvstore/__init__.py b/python/adapters/common/kvstore/__init__.py
similarity index 100%
rename from python/common/kvstore/__init__.py
rename to python/adapters/common/kvstore/__init__.py
diff --git a/python/common/kvstore/consul_client.py b/python/adapters/common/kvstore/consul_client.py
similarity index 97%
rename from python/common/kvstore/consul_client.py
rename to python/adapters/common/kvstore/consul_client.py
index bc14759..789e797 100644
--- a/python/common/kvstore/consul_client.py
+++ b/python/adapters/common/kvstore/consul_client.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
-from common.utils.asleep import asleep
-from common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
+from kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
 from consul import ConsulException
 from consul.twisted import Consul
 from structlog import get_logger
diff --git a/python/common/kvstore/etcd_client.py b/python/adapters/common/kvstore/etcd_client.py
similarity index 98%
rename from python/common/kvstore/etcd_client.py
rename to python/adapters/common/kvstore/etcd_client.py
index a958b71..e1850e7 100644
--- a/python/common/kvstore/etcd_client.py
+++ b/python/adapters/common/kvstore/etcd_client.py
@@ -23,7 +23,7 @@
 #
 ################################################################################
 
-from common.kvstore.kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair
+from kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair
 from structlog import get_logger
 from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
diff --git a/python/common/kvstore/kv_client.py b/python/adapters/common/kvstore/kv_client.py
similarity index 99%
rename from python/common/kvstore/kv_client.py
rename to python/adapters/common/kvstore/kv_client.py
index 69a6480..f6486f3 100644
--- a/python/common/kvstore/kv_client.py
+++ b/python/adapters/common/kvstore/kv_client.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.utils.asleep import asleep
+from python.common.utils.asleep import asleep
 from structlog import get_logger
 from twisted.internet.defer import inlineCallbacks, returnValue
 
diff --git a/python/common/kvstore/kvstore.py b/python/adapters/common/kvstore/kvstore.py
similarity index 91%
rename from python/common/kvstore/kvstore.py
rename to python/adapters/common/kvstore/kvstore.py
index 662b34d..ed7f246 100644
--- a/python/common/kvstore/kvstore.py
+++ b/python/adapters/common/kvstore/kvstore.py
@@ -12,8 +12,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from common.kvstore.consul_client import ConsulClient
-from common.kvstore.etcd_client import EtcdClient
+from consul_client import ConsulClient
+from etcd_client import EtcdClient
 
 def create_kv_client(kv_store, host, port):
     '''
diff --git a/python/adapters/common/manhole.py b/python/adapters/common/manhole.py
deleted file mode 100644
index c00c900..0000000
--- a/python/adapters/common/manhole.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import rlcompleter
-from pprint import pprint
-
-import structlog
-from twisted.conch import manhole_ssh
-from twisted.conch.manhole import ColoredManhole
-from twisted.conch.ssh import keys
-from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
-from twisted.cred.portal import Portal
-from twisted.internet import reactor
-
-log = structlog.get_logger()
-
-
-MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
-MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
-
-
-def get_rsa_keys():
-    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
-                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
-        # generate a RSA keypair
-        log.info('generate-rsa-keypair')
-        from Crypto.PublicKey import RSA
-        rsa_key = RSA.generate(1024)
-        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
-        private_key_str = rsa_key.exportKey()
-
-        # save keys for next time
-        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
-        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
-        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
-                  private=MANHOLE_SERVER_RSA_PRIVATE)
-    else:
-        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
-        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
-    return public_key_str, private_key_str
-
-
-class ManholeWithCompleter(ColoredManhole):
-
-    def __init__(self, namespace):
-        namespace['manhole'] = self
-        super(ManholeWithCompleter, self).__init__(namespace)
-        self.last_tab = None
-        self.completer = rlcompleter.Completer(self.namespace)
-
-    def handle_TAB(self):
-        if self.last_tab != self.lineBuffer:
-            self.last_tab = self.lineBuffer
-            return
-
-        buffer = ''.join(self.lineBuffer)
-        completions = []
-        maxlen = 3
-        for c in xrange(1000):
-            candidate = self.completer.complete(buffer, c)
-            if not candidate:
-                break
-
-            if len(candidate) > maxlen:
-                maxlen = len(candidate)
-
-            completions.append(candidate)
-
-        if len(completions) == 1:
-            rest = completions[0][len(buffer):]
-            self.terminal.write(rest)
-            self.lineBufferIndex += len(rest)
-            self.lineBuffer.extend(rest)
-
-        elif len(completions):
-            maxlen += 3
-            numcols = self.width / maxlen
-            self.terminal.nextLine()
-            for idx, candidate in enumerate(completions):
-                self.terminal.write('%%-%ss' % maxlen % candidate)
-                if not ((idx + 1) % numcols):
-                    self.terminal.nextLine()
-            self.terminal.nextLine()
-            self.drawInputLine()
-
-
-class Manhole(object):
-
-    def __init__(self, port, pws, **kw):
-        kw.update(globals())
-        kw['pp'] = pprint
-
-        realm = manhole_ssh.TerminalRealm()
-        manhole = ManholeWithCompleter(kw)
-
-        def windowChanged(_, win_size):
-            manhole.terminalSize(*reversed(win_size[:2]))
-
-        realm.sessionFactory.windowChanged = windowChanged
-        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
-        portal = Portal(realm)
-        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
-        factory = manhole_ssh.ConchFactory(portal)
-        public_key_str, private_key_str = get_rsa_keys()
-        factory.publicKeys = {
-            'ssh-rsa': keys.Key.fromString(public_key_str)
-        }
-        factory.privateKeys = {
-            'ssh-rsa': keys.Key.fromString(private_key_str)
-        }
-        reactor.listenTCP(port, factory, interface='localhost')
-
-
-if __name__ == '__main__':
-    Manhole(12222, dict(admin='admin'))
-    reactor.run()
diff --git a/python/common/pon_resource_manager/__init__.py b/python/adapters/common/pon_resource_manager/__init__.py
similarity index 100%
rename from python/common/pon_resource_manager/__init__.py
rename to python/adapters/common/pon_resource_manager/__init__.py
diff --git a/python/common/pon_resource_manager/resource_kv_store.py b/python/adapters/common/pon_resource_manager/resource_kv_store.py
similarity index 100%
rename from python/common/pon_resource_manager/resource_kv_store.py
rename to python/adapters/common/pon_resource_manager/resource_kv_store.py
diff --git a/python/common/pon_resource_manager/resource_manager.py b/python/adapters/common/pon_resource_manager/resource_manager.py
similarity index 100%
rename from python/common/pon_resource_manager/resource_manager.py
rename to python/adapters/common/pon_resource_manager/resource_manager.py
diff --git a/python/adapters/common/structlog_setup.py b/python/adapters/common/structlog_setup.py
deleted file mode 100644
index 3401977..0000000
--- a/python/adapters/common/structlog_setup.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""Setting up proper logging for Voltha"""
-
-import logging
-import logging.config
-from collections import OrderedDict
-
-import structlog
-from structlog.stdlib import BoundLogger, INFO
-
-try:
-    from thread import get_ident as _get_ident
-except ImportError:
-    from dummy_thread import get_ident as _get_ident
-
-
-class StructuredLogRenderer(object):
-    def __call__(self, logger, name, event_dict):
-        # in order to keep structured log data in event_dict to be forwarded as
-        # is, we need to pass it into the logger framework as the first
-        # positional argument.
-        args = (event_dict,)
-        kwargs = {}
-        return args, kwargs
-
-
-class PlainRenderedOrderedDict(OrderedDict):
-    """Our special version of OrderedDict that renders into string as a dict,
-       to make the log stream output cleaner.
-    """
-    def __repr__(self, _repr_running={}):
-        'od.__repr__() <==> repr(od)'
-        call_key = id(self), _get_ident()
-        if call_key in _repr_running:
-            return '...'
-        _repr_running[call_key] = 1
-        try:
-            if not self:
-                return '{}'
-            return '{%s}' % ", ".join("%s: %s" % (k, v)
-                                      for k, v in self.items())
-        finally:
-            del _repr_running[call_key]
-
-
-def setup_logging(log_config, instance_id, verbosity_adjust=0):
-    """
-    Set up logging such that:
-    - The primary logging entry method is structlog
-      (see http://structlog.readthedocs.io/en/stable/index.html)
-    - By default, the logging backend is Python standard lib logger
-    """
-
-    def add_exc_info_flag_for_exception(_, name, event_dict):
-        if name == 'exception':
-            event_dict['exc_info'] = True
-        return event_dict
-
-    def add_instance_id(_, __, event_dict):
-        event_dict['instance_id'] = instance_id
-        return event_dict
-
-    # Configure standard logging
-    logging.config.dictConfig(log_config)
-    logging.root.level -= 10 * verbosity_adjust
-
-    processors = [
-        add_exc_info_flag_for_exception,
-        structlog.processors.StackInfoRenderer(),
-        structlog.processors.format_exc_info,
-        add_instance_id,
-        StructuredLogRenderer(),
-    ]
-    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
-                        context_class=PlainRenderedOrderedDict,
-                        wrapper_class=BoundLogger,
-                        processors=processors)
-
-    # Mark first line of log
-    log = structlog.get_logger()
-    log.info("first-line")
-    return log
-
-
-def update_logging(instance_id, vcore_id):
-    """
-    Add the vcore id to the structured logger
-    :param vcore_id:  The assigned vcore id
-    :return: structure logger
-    """
-    def add_exc_info_flag_for_exception(_, name, event_dict):
-        if name == 'exception':
-            event_dict['exc_info'] = True
-        return event_dict
-
-    def add_instance_id(_, __, event_dict):
-        if instance_id is not None:
-            event_dict['instance_id'] = instance_id
-        return event_dict
-
-    def add_vcore_id(_, __, event_dict):
-        if vcore_id is not None:
-            event_dict['vcore_id'] = vcore_id
-        return event_dict
-
-    processors = [
-        add_exc_info_flag_for_exception,
-        structlog.processors.StackInfoRenderer(),
-        structlog.processors.format_exc_info,
-        add_instance_id,
-        add_vcore_id,
-        StructuredLogRenderer(),
-    ]
-    structlog.configure(processors=processors)
-
-    # Mark first line of log
-    log = structlog.get_logger()
-    log.info("updated-logger")
-    return log
diff --git a/python/adapters/common/utils/__init__.py b/python/adapters/common/utils/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/adapters/common/utils/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/adapters/common/utils/asleep.py b/python/adapters/common/utils/asleep.py
deleted file mode 100644
index 10d1ce3..0000000
--- a/python/adapters/common/utils/asleep.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-""" Async sleep (asleep) method and other twisted goodies """
-
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-
-
-def asleep(dt):
-    """
-    Async (event driven) wait for given time period (in seconds)
-    :param dt: Delay in seconds
-    :return: Deferred to be fired with value None when time expires.
-    """
-    d = Deferred()
-    reactor.callLater(dt, lambda: d.callback(None))
-    return d
diff --git a/python/adapters/common/utils/consulhelpers.py b/python/adapters/common/utils/consulhelpers.py
deleted file mode 100644
index 6060ba3..0000000
--- a/python/adapters/common/utils/consulhelpers.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some consul related convenience functions
-"""
-
-from structlog import get_logger
-from consul import Consul
-from random import randint
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4
-
-log = get_logger()
-
-
-def connect_to_consult(consul_endpoint):
-    log.debug('getting-service-endpoint', consul=consul_endpoint)
-
-    host = consul_endpoint.split(':')[0].strip()
-    port = int(consul_endpoint.split(':')[1].strip())
-
-    return Consul(host=host, port=port)
-
-
-def verify_all_services_healthy(consul_endpoint, service_name=None,
-                                number_of_expected_services=None):
-    """
-    Verify in consul if any service is healthy
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service to check, optional
-    :param number_of_expected_services number of services to check for, optional
-    :return: true if healthy, false otherwise
-    """
-
-    def check_health(service):
-        _, serv_health = consul.health.service(service, passing=True)
-        return not serv_health == []
-
-    consul = connect_to_consult(consul_endpoint)
-
-    if service_name is not None:
-        return check_health(service_name)
-
-    services = get_all_services(consul_endpoint)
-
-    items = services.keys()
-
-    if number_of_expected_services is not None and \
-                    len(items) != number_of_expected_services:
-        return False
-
-    for item in items:
-        if not check_health(item):
-            return False
-
-    return True
-
-
-def get_all_services(consul_endpoint):
-    log.debug('getting-service-verify-health')
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.services()
-
-    return services
-
-
-def get_all_instances_of_service(consul_endpoint, service_name):
-    log.debug('getting-all-instances-of-service', service=service_name)
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.service(service_name)
-
-    for service in services:
-        log.debug('service',
-                  name=service['ServiceName'],
-                  serviceid=service['ServiceID'],
-                  serviceport=service['ServicePort'],
-                  createindex=service['CreateIndex'])
-
-    return services
-
-
-def get_endpoint_from_consul(consul_endpoint, service_name):
-    """
-    Get endpoint of service_name from consul.
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service for which endpoint
-                         needs to be found.
-    :return: service endpoint if available, else exit.
-    """
-    log.debug('getting-service-info', service=service_name)
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.service(service_name)
-
-    if len(services) == 0:
-        raise Exception(
-            'Cannot find service {} in consul'.format(service_name))
-        os.exit(1)
-
-    """ Get host IPV4 address
-    """
-    local_ipv4 = get_my_primary_local_ipv4()
-    """ If host IP address from where the request came in matches
-        the IP address of the requested service's host IP address,
-        pick the endpoint
-    """
-    for i in range(len(services)):
-        service = services[i]
-        if service['ServiceAddress'] == local_ipv4:
-            log.debug("picking address locally")
-            endpoint = '{}:{}'.format(service['ServiceAddress'],
-                                      service['ServicePort'])
-            return endpoint
-
-    """ If service is not available locally, picak a random
-        endpoint for the service from the list
-    """
-    service = services[randint(0, len(services) - 1)]
-    endpoint = '{}:{}'.format(service['ServiceAddress'],
-                              service['ServicePort'])
-
-    return endpoint
-
-
-def get_healthy_instances(consul_endpoint, service_name=None,
-                          number_of_expected_services=None):
-    """
-    Verify in consul if any service is healthy
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service to check, optional
-    :param number_of_expected_services number of services to check for, optional
-    :return: true if healthy, false otherwise
-    """
-
-    def check_health(service):
-        _, serv_health = consul.health.service(service, passing=True)
-        return not serv_health == []
-
-    consul = connect_to_consult(consul_endpoint)
-
-    if service_name is not None:
-        return check_health(service_name)
-
-    services = get_all_services(consul_endpoint)
-
-    items = services.keys()
-
-    if number_of_expected_services is not None and \
-                    len(items) != number_of_expected_services:
-        return False
-
-    for item in items:
-        if not check_health(item):
-            return False
-
-    return True
-
-
-if __name__ == '__main__':
-    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
-    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
-    # print get_healthy_instances('10.100.198.220:8500')
-    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/python/adapters/common/utils/deferred_utils.py b/python/adapters/common/utils/deferred_utils.py
deleted file mode 100644
index 3c55c1a..0000000
--- a/python/adapters/common/utils/deferred_utils.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-from twisted.internet.error import AlreadyCalled
-
-
-class TimeOutError(Exception): pass
-
-
-class DeferredWithTimeout(Deferred):
-    """
-    Deferred with a timeout. If neither the callback nor the errback method
-    is not called within the given time, the deferred's errback will be called
-    with a TimeOutError() exception.
-
-    All other uses are the same as of Deferred().
-    """
-    def __init__(self, timeout=1.0):
-        Deferred.__init__(self)
-        self._timeout = timeout
-        self.timer = reactor.callLater(timeout, self.timed_out)
-
-    def timed_out(self):
-        self.errback(
-            TimeOutError('timed out after {} seconds'.format(self._timeout)))
-
-    def callback(self, result):
-        self._cancel_timer()
-        return Deferred.callback(self, result)
-
-    def errback(self, fail):
-        self._cancel_timer()
-        return Deferred.errback(self, fail)
-
-    def cancel(self):
-        self._cancel_timer()
-        return Deferred.cancel(self)
-
-    def _cancel_timer(self):
-        try:
-            self.timer.cancel()
-        except AlreadyCalled:
-            pass
-
diff --git a/python/adapters/common/utils/dockerhelpers.py b/python/adapters/common/utils/dockerhelpers.py
deleted file mode 100644
index 4620aef..0000000
--- a/python/adapters/common/utils/dockerhelpers.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some docker related convenience functions
-"""
-from datetime import datetime
-from concurrent.futures import ThreadPoolExecutor
-
-import os
-import socket
-from structlog import get_logger
-
-from docker import Client, errors
-
-
-docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
-log = get_logger()
-
-def get_my_containers_name():
-    """
-    Return the docker containers name in which this process is running.
-    To look up the container name, we use the container ID extracted from the
-    $HOSTNAME environment variable (which is set by docker conventions).
-    :return: String with the docker container name (or None if any issue is
-             encountered)
-    """
-    my_container_id = os.environ.get('HOSTNAME', None)
-
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        info = docker_cli.inspect_container(my_container_id)
-
-    except Exception, e:
-        log.exception('failed', my_container_id=my_container_id, e=e)
-        raise
-
-    name = info['Name'].lstrip('/')
-
-    return name
-
-def get_all_running_containers():
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        containers = docker_cli.containers()
-
-    except Exception, e:
-        log.exception('failed', e=e)
-        raise
-
-    return containers
-
-def inspect_container(id):
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        info = docker_cli.inspect_container(id)
-    except Exception, e:
-        log.exception('failed-inspect-container', id=id, e=e)
-        raise
-
-    return info
-
diff --git a/python/adapters/common/utils/grpc_utils.py b/python/adapters/common/utils/grpc_utils.py
deleted file mode 100644
index 8df630e..0000000
--- a/python/adapters/common/utils/grpc_utils.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Utilities to handle gRPC server and client side code in a Twisted environment
-"""
-import structlog
-from concurrent.futures import Future
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-from twisted.python.threadable import isInIOThread
-
-
-log = structlog.get_logger()
-
-
-def twisted_async(func):
-    """
-    This decorator can be used to implement a gRPC method on the twisted
-    thread, allowing asynchronous programming in Twisted while serving
-    a gRPC call.
-
-    gRPC methods normally are called on the futures.ThreadPool threads,
-    so these methods cannot directly use Twisted protocol constructs.
-    If the implementation of the methods needs to touch Twisted, it is
-    safer (or mandatory) to wrap the method with this decorator, which will
-    call the inner method from the external thread and ensure that the
-    result is passed back to the foreign thread.
-
-    Example usage:
-
-    When implementing a gRPC server, typical pattern is:
-
-    class SpamService(SpamServicer):
-
-        def GetBadSpam(self, request, context):
-            '''this is called from a ThreadPoolExecutor thread'''
-            # generally unsafe to make Twisted calls
-
-        @twisted_async
-        def GetSpamSafely(self, request, context):
-            '''this method now is executed on the Twisted main thread
-            # safe to call any Twisted protocol functions
-
-        @twisted_async
-        @inlineCallbacks
-        def GetAsyncSpam(self, request, context):
-            '''this generator can use inlineCallbacks Twisted style'''
-            result = yield some_async_twisted_call(request)
-            returnValue(result)
-
-    """
-    def in_thread_wrapper(*args, **kw):
-
-        if isInIOThread():
-
-            return func(*args, **kw)
-
-        f = Future()
-
-        def twisted_wrapper():
-            try:
-                d = func(*args, **kw)
-                if isinstance(d, Deferred):
-
-                    def _done(result):
-                        f.set_result(result)
-                        f.done()
-
-                    def _error(e):
-                        f.set_exception(e)
-                        f.done()
-
-                    d.addCallback(_done)
-                    d.addErrback(_error)
-
-                else:
-                    f.set_result(d)
-                    f.done()
-
-            except Exception, e:
-                f.set_exception(e)
-                f.done()
-
-        reactor.callFromThread(twisted_wrapper)
-        try:
-            result = f.result()
-        except Exception, e:
-            log.exception(e=e, func=func, args=args, kw=kw)
-            raise
-
-        return result
-
-    return in_thread_wrapper
-
-
diff --git a/python/adapters/common/utils/id_generation.py b/python/adapters/common/utils/id_generation.py
deleted file mode 100644
index e0fea1c..0000000
--- a/python/adapters/common/utils/id_generation.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# """ ID generation utils """
-
-from uuid import uuid4
-
-
-BROADCAST_CORE_ID=hex(0xFFFF)[2:]
-
-def get_next_core_id(current_id_in_hex_str):
-    """
-    :param current_id_in_hex_str: a hex string of the maximum core id 
-    assigned without the leading 0x characters
-    :return: current_id_in_hex_str + 1 in hex string 
-    """
-    if not current_id_in_hex_str or current_id_in_hex_str == '':
-        return '0001'
-    else:
-        return format(int(current_id_in_hex_str, 16) + 1, '04x')
-
-
-def create_cluster_logical_device_ids(core_id, switch_id):
-    """
-    Creates a logical device id and an OpenFlow datapath id that is unique 
-    across the Voltha cluster.
-    The returned logical device id  represents a 64 bits integer where the
-    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
-    the datapath id the core id is set to '0000' as it is not used for voltha
-    core routing
-    :param core_id: string
-    :param switch_id:int
-    :return: cluster logical device id and OpenFlow datapath id
-    """
-    switch_id = format(switch_id, '012x')
-    core_in_hex=format(int(core_id, 16), '04x')
-    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
-    dpid_id = '{}{}'.format('0000', switch_id[-12:])
-    return ld_id, int(dpid_id, 16)
-
-def is_broadcast_core_id(id):
-    assert id and len(id) == 16
-    return id[:4] == BROADCAST_CORE_ID
-
-def create_empty_broadcast_id():
-    """
-    Returns an empty broadcast id (ffff000000000000). The id is used to
-    dispatch xPON objects across all the Voltha instances.
-    :return: An empty broadcast id
-    """
-    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
-
-def create_cluster_id():
-    """
-    Returns an id that is common across all voltha instances.  The id  
-    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
-    object while the upper 16 bits refers a broadcast core_id
-    :return: An common id across all Voltha instances
-    """
-    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
-
-def create_cluster_device_id(core_id):
-    """
-    Creates a device id that is unique across the Voltha cluster.
-    The device id is a str of 64 bits.  The lower 48 bits refers to the 
-    device id while the upper 16 bits refers to the core id.
-    :param core_id: string
-    :return: cluster device id
-    """
-    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
-
-
-def get_core_id_from_device_id(device_id):
-    # Device id is a string and the first 4 characters represent the core_id
-    assert device_id and len(device_id) == 16
-    # Get the leading 4 hexs and remove leading 0's
-    return device_id[:4]
-
-
-def get_core_id_from_logical_device_id(logical_device_id):
-    """ 
-    Logical Device id is a string and the first 4 characters represent the 
-    core_id
-    :param logical_device_id: 
-    :return: core_id string
-    """
-    assert logical_device_id and len(logical_device_id) == 16
-    # Get the leading 4 hexs and remove leading 0's
-    return logical_device_id[:4]
-
-
-def get_core_id_from_datapath_id(datapath_id):
-    """
-    datapath id is a uint64 where:
-        - low 48 bits -> switch_id
-        - high 16 bits -> core id
-    :param datapath_id: 
-    :return: core_id string
-    """
-    assert datapath_id
-    # Get the hex string and remove the '0x' prefix
-    id_in_hex_str = hex(datapath_id)[2:]
-    assert len(id_in_hex_str) > 12
-    return id_in_hex_str[:-12]
diff --git a/python/adapters/common/utils/indexpool.py b/python/adapters/common/utils/indexpool.py
deleted file mode 100644
index 858cb3a..0000000
--- a/python/adapters/common/utils/indexpool.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from bitstring import BitArray
-import structlog
-
-log = structlog.get_logger()
-
-class IndexPool(object):
-    def __init__(self, max_entries, offset):
-        self.max_entries = max_entries
-        self.offset = offset
-        self.indices = BitArray(self.max_entries)
-
-    def get_next(self):
-        try:
-            _pos = self.indices.find('0b0')
-            self.indices.set(1, _pos)
-            return self.offset + _pos[0]
-        except IndexError:
-            log.info("exception-fail-to-allocate-id-all-bits-in-use")
-            return None
-
-    def allocate(self, index):
-        try:
-            _pos = index - self.offset
-            if not (0 <= _pos < self.max_entries):
-                log.info("{}-out-of-range".format(index))
-                return None
-            if self.indices[_pos]:
-                log.info("{}-is-already-allocated".format(index))
-                return None
-            self.indices.set(1, _pos)
-            return index
-
-        except IndexError:
-            return None
-
-    def release(self, index):
-        index -= self.offset
-        _pos = (index,)
-        try:
-            self.indices.set(0, _pos)
-        except IndexError:
-            log.info("bit-position-{}-out-of-range".format(index))
-
-    #index or multiple indices to set all of them to 1 - need to be a tuple
-    def pre_allocate(self, index):
-        if(isinstance(index, tuple)):
-            _lst = list(index)
-            for i in range(len(_lst)):
-                _lst[i] -= self.offset
-            index = tuple(_lst)
-            self.indices.set(1, index)
diff --git a/python/adapters/common/utils/json_format.py b/python/adapters/common/utils/json_format.py
deleted file mode 100644
index c18d013..0000000
--- a/python/adapters/common/utils/json_format.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Monkey patched json_format to allow best effort decoding of Any fields.
-Use the additional flag (strict_any_handling=False) to trigger the
-best-effort behavior. Omit the flag, or just use the original json_format
-module fot the strict behavior.
-"""
-
-from google.protobuf import json_format
-
-class _PatchedPrinter(json_format._Printer):
-
-    def __init__(self, including_default_value_fields=False,
-                 preserving_proto_field_name=False,
-                 strict_any_handling=False):
-        super(_PatchedPrinter, self).__init__(including_default_value_fields,
-                                              preserving_proto_field_name)
-        self.strict_any_handling = strict_any_handling
-
-    def _BestEffortAnyMessageToJsonObject(self, msg):
-        try:
-            res = self._AnyMessageToJsonObject(msg)
-        except TypeError:
-            res = self._RegularMessageToJsonObject(msg, {})
-        return res
-
-
-def MessageToDict(message,
-                  including_default_value_fields=False,
-                  preserving_proto_field_name=False,
-                  strict_any_handling=False):
-    """Converts protobuf message to a JSON dictionary.
-
-    Args:
-      message: The protocol buffers message instance to serialize.
-      including_default_value_fields: If True, singular primitive fields,
-          repeated fields, and map fields will always be serialized.  If
-          False, only serialize non-empty fields.  Singular message fields
-          and oneof fields are not affected by this option.
-      preserving_proto_field_name: If True, use the original proto field
-          names as defined in the .proto file. If False, convert the field
-          names to lowerCamelCase.
-      strict_any_handling: If True, converion will error out (like in the
-          original method) if an Any field with value for which the Any type
-          is not loaded is encountered. If False, the conversion will leave
-          the field un-packed, but otherwise will continue.
-
-    Returns:
-      A dict representation of the JSON formatted protocol buffer message.
-    """
-    printer = _PatchedPrinter(including_default_value_fields,
-                              preserving_proto_field_name,
-                              strict_any_handling=strict_any_handling)
-    # pylint: disable=protected-access
-    return printer._MessageToJsonObject(message)
-
-
-def MessageToJson(message,
-                  including_default_value_fields=False,
-                  preserving_proto_field_name=False,
-                  strict_any_handling=False):
-  """Converts protobuf message to JSON format.
-
-  Args:
-    message: The protocol buffers message instance to serialize.
-    including_default_value_fields: If True, singular primitive fields,
-        repeated fields, and map fields will always be serialized.  If
-        False, only serialize non-empty fields.  Singular message fields
-        and oneof fields are not affected by this option.
-    preserving_proto_field_name: If True, use the original proto field
-        names as defined in the .proto file. If False, convert the field
-        names to lowerCamelCase.
-    strict_any_handling: If True, converion will error out (like in the
-        original method) if an Any field with value for which the Any type
-        is not loaded is encountered. If False, the conversion will leave
-        the field un-packed, but otherwise will continue.
-
-  Returns:
-    A string containing the JSON formatted protocol buffer message.
-  """
-  printer = _PatchedPrinter(including_default_value_fields,
-                            preserving_proto_field_name,
-                            strict_any_handling=strict_any_handling)
-  return printer.ToJsonString(message)
-
-
-json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
-    '_BestEffortAnyMessageToJsonObject',
-    '_ConvertAnyMessage'
-]
-
-json_format._Printer._BestEffortAnyMessageToJsonObject = \
-    json_format._Printer._AnyMessageToJsonObject
diff --git a/python/adapters/common/utils/message_queue.py b/python/adapters/common/utils/message_queue.py
deleted file mode 100644
index 2b4257a..0000000
--- a/python/adapters/common/utils/message_queue.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from twisted.internet.defer import Deferred
-from twisted.internet.defer import succeed
-
-
-class MessageQueue(object):
-    """
-    An event driven queue, similar to twisted.internet.defer.DeferredQueue
-    but which allows selective dequeing based on a predicate function.
-    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
-    limit.
-    """
-
-    def __init__(self):
-        self.waiting = []  # tuples of (d, predicate)
-        self.queue = []  # messages piling up here if no one is waiting
-
-    def reset(self):
-        """
-        Purge all content as well as waiters (by errback-ing their entries).
-        :return: None
-        """
-        for d, _ in self.waiting:
-            d.errback(Exception('mesage queue reset() was called'))
-        self.waiting = []
-        self.queue = []
-
-    def _cancelGet(self, d):
-        """
-        Remove a deferred from our waiting list.
-        :param d: The deferred that was been canceled.
-        :return: None
-        """
-        for i in range(len(self.waiting)):
-            if self.waiting[i][0] is d:
-                self.waiting.pop(i)
-
-    def put(self, obj):
-        """
-        Add an object to this queue
-        :param obj: arbitrary object that will be added to the queue
-        :return:
-        """
-
-        # if someone is waiting for this, return right away
-        for i in range(len(self.waiting)):
-            d, predicate = self.waiting[i]
-            if predicate is None or predicate(obj):
-                self.waiting.pop(i)
-                d.callback(obj)
-                return
-
-        # otherwise...
-        self.queue.append(obj)
-
-    def get(self, predicate=None):
-        """
-        Attempt to retrieve and remove an object from the queue that
-        matches the optional predicate.
-        :return: Deferred which fires with the next object available.
-        If predicate was provided, only objects for which
-        predicate(obj) is True will be considered.
-        """
-        for i in range(len(self.queue)):
-            msg = self.queue[i]
-            if predicate is None or predicate(msg):
-                self.queue.pop(i)
-                return succeed(msg)
-
-        # there were no matching entries if we got here, so we wait
-        d = Deferred(canceller=self._cancelGet)
-        self.waiting.append((d, predicate))
-        return d
-
-
diff --git a/python/adapters/common/utils/nethelpers.py b/python/adapters/common/utils/nethelpers.py
deleted file mode 100644
index b17aced..0000000
--- a/python/adapters/common/utils/nethelpers.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some network related convenience functions
-"""
-
-from netifaces import AF_INET
-
-import netifaces as ni
-import netaddr
-
-
-def _get_all_interfaces():
-    m_interfaces = []
-    for iface in ni.interfaces():
-        m_interfaces.append((iface, ni.ifaddresses(iface)))
-    return m_interfaces
-
-
-def _get_my_primary_interface():
-    gateways = ni.gateways()
-    assert 'default' in gateways, \
-        ("No default gateway on host/container, "
-         "cannot determine primary interface")
-    default_gw_index = gateways['default'].keys()[0]
-    # gateways[default_gw_index] has the format (example):
-    # [('10.15.32.1', 'en0', True)]
-    interface_name = gateways[default_gw_index][0][1]
-    return interface_name
-
-
-def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
-    if not inter_core_subnet:
-        return _get_my_primary_local_ipv4(ifname)
-    # My IP should belong to the specified subnet
-    for iface in ni.interfaces():
-        addresses = ni.ifaddresses(iface)
-        if AF_INET in addresses:
-            m_ip = addresses[AF_INET][0]['addr']
-            _ip = netaddr.IPAddress(m_ip).value
-            m_network = netaddr.IPNetwork(inter_core_subnet)
-            if _ip >= m_network.first and _ip <= m_network.last:
-                return m_ip
-    return None
-
-
-def get_my_primary_interface(pon_subnet=None):
-    if not pon_subnet:
-        return _get_my_primary_interface()
-    # My interface should have an IP that belongs to the specified subnet
-    for iface in ni.interfaces():
-        addresses = ni.ifaddresses(iface)
-        if AF_INET in addresses:
-            m_ip = addresses[AF_INET][0]['addr']
-            m_ip = netaddr.IPAddress(m_ip).value
-            m_network = netaddr.IPNetwork(pon_subnet)
-            if m_ip >= m_network.first and m_ip <= m_network.last:
-                return iface
-    return None
-
-
-def _get_my_primary_local_ipv4(ifname=None):
-    try:
-        ifname = get_my_primary_interface() if ifname is None else ifname
-        addresses = ni.ifaddresses(ifname)
-        ipv4 = addresses[AF_INET][0]['addr']
-        return ipv4
-    except Exception as e:
-        return None
-
-if __name__ == '__main__':
-    print get_my_primary_local_ipv4()
diff --git a/python/adapters/common/utils/ordered_weakvalue_dict.py b/python/adapters/common/utils/ordered_weakvalue_dict.py
deleted file mode 100644
index 9ea739a..0000000
--- a/python/adapters/common/utils/ordered_weakvalue_dict.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from _weakref import ref
-from weakref import KeyedRef
-from collections import OrderedDict
-
-
-class OrderedWeakValueDict(OrderedDict):
-    """
-    Modified OrderedDict to use weak references as values. Entries disappear
-    automatically if the referred value has no more strong reference pointing
-    ot it.
-
-    Warning, this is not a complete implementation, only what is needed for
-    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
-    """
-    def __init__(self, *args, **kw):
-        def remove(wr, selfref=ref(self)):
-            self = selfref()
-            if self is not None:
-                super(OrderedWeakValueDict, self).__delitem__(wr.key)
-        self._remove = remove
-        super(OrderedWeakValueDict, self).__init__(*args, **kw)
-
-    def __setitem__(self, key, value):
-        super(OrderedWeakValueDict, self).__setitem__(
-            key, KeyedRef(value, self._remove, key))
-
-    def __getitem__(self, key):
-        o = super(OrderedWeakValueDict, self).__getitem__(key)()
-        if o is None:
-            raise KeyError, key
-        else:
-            return o
-
diff --git a/python/adapters/docker/Dockerfile.adapter_ponsim_olt b/python/adapters/docker/Dockerfile.adapter_ponsim_olt
deleted file mode 100644
index 209200d..0000000
--- a/python/adapters/docker/Dockerfile.adapter_ponsim_olt
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ARG TAG=latest
-ARG REGISTRY=
-ARG REPOSITORY=
-
-FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
-FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Bundle app source
-RUN mkdir /adapters && touch /adapters/__init__.py
-ENV PYTHONPATH=/adapters
-COPY common /adapters/adapters/common
-COPY kafka /adapters/adapters/kafka
-COPY ./*.py /adapters/adapters/
-#COPY pki /voltha/pki
-COPY ponsim_olt /adapters/adapters/ponsim_olt
-RUN touch /adapters/adapters/__init__.py
-
-
-# Copy in the generated GRPC proto code
-COPY --from=protos /protos/voltha /adapters/adapters/protos
-COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
-RUN touch /adapters/adapters/protos/__init__.py
-RUN touch /adapters/adapters/protos/third_party/google/__init__.py
-
-# Exposing process and default entry point
-# CMD ["python", "/adapters/ponsim_olt/main.py"]
diff --git a/python/adapters/docker/Dockerfile.adapter_ponsim_onu b/python/adapters/docker/Dockerfile.adapter_ponsim_onu
deleted file mode 100644
index d0d3e36..0000000
--- a/python/adapters/docker/Dockerfile.adapter_ponsim_onu
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ARG TAG=latest
-ARG REGISTRY=
-ARG REPOSITORY=
-
-FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
-FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Bundle app source
-RUN mkdir /adapters && touch /adapters/__init__.py
-ENV PYTHONPATH=/adapters
-COPY common /adapters/adapters/common
-COPY kafka /adapters/adapters/kafka
-COPY ./*.py /adapters/adapters/
-#COPY pki /voltha/pki
-COPY ponsim_onu /adapters/adapters/ponsim_onu
-RUN touch /adapters/adapters/__init__.py
-
-
-# Copy in the generated GRPC proto code
-COPY --from=protos /protos/voltha /adapters/adapters/protos
-COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
-RUN touch /adapters/adapters/protos/__init__.py
-RUN touch /adapters/adapters/protos/third_party/google/__init__.py
-
-# Exposing process and default entry point
-# CMD ["python", "/adapters/ponsim_onu/main.py"]
diff --git a/python/adapters/docker/Dockerfile.base b/python/adapters/docker/Dockerfile.base
deleted file mode 100644
index 1b912e0..0000000
--- a/python/adapters/docker/Dockerfile.base
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:xenial
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Update to have latest images
-RUN apt-get update && \
-    apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
-
-COPY requirements.txt /tmp/requirements.txt
-
-# pip install cython enum34 six && \
-# Install app dependencies
-RUN wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
-    dpkg -i *.deb && \
-    rm -f *.deb && \
-    apt-get update && \
-    apt-get install -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
-    pip install -r /tmp/requirements.txt && \
-    apt-get purge -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
-    apt-get autoremove -y
diff --git a/python/adapters/iadapter.py b/python/adapters/iadapter.py
index ee4d116..04cb303 100644
--- a/python/adapters/iadapter.py
+++ b/python/adapters/iadapter.py
@@ -22,13 +22,13 @@
 from twisted.internet import reactor
 from zope.interface import implementer
 
-from adapters.interface import IAdapterInterface
-from adapters.protos.adapter_pb2 import Adapter
-from adapters.protos.adapter_pb2 import AdapterConfig
-from adapters.protos.common_pb2 import AdminState
-from adapters.protos.common_pb2 import LogLevel
-from adapters.protos.device_pb2 import DeviceType, DeviceTypes
-from adapters.protos.health_pb2 import HealthStatus
+from interface import IAdapterInterface
+from python.protos.adapter_pb2 import Adapter
+from python.protos.adapter_pb2 import AdapterConfig
+from python.protos.common_pb2 import AdminState
+from python.protos.common_pb2 import LogLevel
+from python.protos.device_pb2 import DeviceType, DeviceTypes
+from python.protos.health_pb2 import HealthStatus
 
 log = structlog.get_logger()
 
@@ -273,7 +273,7 @@
         handler.send_proxied_message(proxy_address, msg)
 
     def process_inter_adapter_message(self, msg):
-        log.info('process-inter-adapter-message', msg=msg)
+        log.debug('process-inter-adapter-message', msg=msg)
         # Unpack the header to know which device needs to handle this message
         handler = None
         if msg.header.proxy_device_id:
@@ -286,18 +286,11 @@
         if handler:
             reactor.callLater(0, handler.process_inter_adapter_message, msg)
 
-    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
-        def ldi_to_di(ldi):
-            di = self.logical_device_id_to_root_device_id.get(ldi)
-            if di is None:
-                logical_device = self.core_proxy.get_logical_device(ldi)
-                di = logical_device.root_device_id
-                self.logical_device_id_to_root_device_id[ldi] = di
-            return di
-
-        device_id = ldi_to_di(logical_device_id)
+    def receive_packet_out(self, device_id, egress_port_no, msg):
+        log.info('receive_packet_out', device_id=device_id,
+                 egress_port=egress_port_no, msg=msg)
         handler = self.devices_handlers[device_id]
-        handler.packet_out(egress_port_no, msg)
+        handler.packet_out(egress_port_no, msg.data)
 
 
 """
diff --git a/python/adapters/kafka/adapter_proxy.py b/python/adapters/kafka/adapter_proxy.py
index fad1093..769de80 100644
--- a/python/adapters/kafka/adapter_proxy.py
+++ b/python/adapters/kafka/adapter_proxy.py
@@ -21,9 +21,9 @@
 import structlog
 from uuid import uuid4
 from twisted.internet.defer import inlineCallbacks, returnValue
-from adapters.kafka.container_proxy import ContainerProxy
-from adapters.protos import third_party
-from adapters.protos.core_adapter_pb2 import InterAdapterHeader, \
+from container_proxy import ContainerProxy
+from python.protos import third_party
+from python.protos.core_adapter_pb2 import InterAdapterHeader, \
     InterAdapterMessage
 import time
 
diff --git a/python/adapters/kafka/adapter_request_facade.py b/python/adapters/kafka/adapter_request_facade.py
index 67f7869..cbae56d 100644
--- a/python/adapters/kafka/adapter_request_facade.py
+++ b/python/adapters/kafka/adapter_request_facade.py
@@ -22,11 +22,11 @@
 from twisted.internet.defer import inlineCallbacks
 from zope.interface import implementer
 
-from adapters.interface import IAdapterInterface
-from adapters.protos.core_adapter_pb2 import IntType, InterAdapterMessage
-from adapters.protos.device_pb2 import Device
-from adapters.protos.openflow_13_pb2 import FlowChanges, FlowGroups, Flows, \
-    FlowGroupChanges
+from python.adapters.interface import IAdapterInterface
+from python.protos.core_adapter_pb2 import IntType, InterAdapterMessage, StrType, Error, ErrorCode
+from python.protos.device_pb2 import Device
+from python.protos.openflow_13_pb2 import FlowChanges, FlowGroups, Flows, \
+    FlowGroupChanges, ofp_packet_out
 
 
 class MacAddressError(BaseException):
@@ -68,7 +68,8 @@
             device.Unpack(d)
             return True, self.adapter.adopt_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_ofp_device_info(self, device):
         d = Device()
@@ -76,17 +77,22 @@
             device.Unpack(d)
             return True, self.adapter.get_ofp_device_info(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_ofp_port_info(self, device, port_no):
         d = Device()
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         p = IntType()
-        port_no.Unpack(p)
+        if port_no:
+            port_no.Unpack(p)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
 
         return True, self.adapter.get_ofp_port_info(d, p.val)
 
@@ -102,7 +108,8 @@
             device.Unpack(d)
             return True, self.adapter.disable_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def reenable_device(self, device):
         d = Device()
@@ -110,7 +117,8 @@
             device.Unpack(d)
             return True, self.adapter.reenable_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def reboot_device(self, device):
         d = Device()
@@ -118,7 +126,8 @@
             device.Unpack(d)
             return (True, self.adapter.reboot_device(d))
         else:
-            return (False, d)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def download_image(self, device, request):
         return self.adapter.download_image(device, request)
@@ -144,7 +153,8 @@
             device.Unpack(d)
             return (True, self.adapter.delete_device(d))
         else:
-            return (False, d)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_device_details(self, device):
         return self.adapter.get_device_details(device)
@@ -154,8 +164,8 @@
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         f = Flows()
         if flows:
             flows.Unpack(f)
@@ -171,8 +181,8 @@
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         f = FlowChanges()
         if flow_changes:
             flow_changes.Unpack(f)
@@ -194,6 +204,33 @@
         if msg:
             msg.Unpack(m)
         else:
-            return (False, m)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="msg-invalid")
 
         return (True, self.adapter.process_inter_adapter_message(m))
+
+
+    def receive_packet_out(self, deviceId, outPort, packet):
+        d_id = StrType()
+        if deviceId:
+            deviceId.Unpack(d_id)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="deviceid-invalid")
+
+        op = IntType
+        if outPort:
+            outPort.Unpack(op)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="outport-invalid")
+
+        p = ofp_packet_out()
+        if packet:
+            packet.Unpack(p)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="packet-invalid")
+
+        return (True, self.adapter.receive_packet_out(d_id, op, p))
+
diff --git a/python/adapters/kafka/container_proxy.py b/python/adapters/kafka/container_proxy.py
index 79918cd..8c4e828 100644
--- a/python/adapters/kafka/container_proxy.py
+++ b/python/adapters/kafka/container_proxy.py
@@ -23,9 +23,9 @@
 from twisted.python import failure
 from zope.interface import implementer
 
-from adapters.common.utils.deferred_utils import DeferredWithTimeout, \
+from python.common.utils.deferred_utils import DeferredWithTimeout, \
     TimeOutError
-from adapters.common.utils.registry import IComponent
+from python.common.utils.registry import IComponent
 
 log = structlog.get_logger()
 
diff --git a/python/adapters/kafka/core_proxy.py b/python/adapters/kafka/core_proxy.py
index cc3f081..4bab30d 100644
--- a/python/adapters/kafka/core_proxy.py
+++ b/python/adapters/kafka/core_proxy.py
@@ -21,11 +21,11 @@
 from google.protobuf.message import Message
 from twisted.internet.defer import inlineCallbacks, returnValue
 
-from adapters.kafka.container_proxy import ContainerProxy
-from adapters.protos.common_pb2 import ID, ConnectStatus, OperStatus
-from adapters.protos.core_adapter_pb2 import StrType, BoolType, IntType
-from adapters.protos.device_pb2 import Device, Ports
-from adapters.protos.voltha_pb2 import CoreInstance
+from container_proxy import ContainerProxy
+from python.protos.common_pb2 import ID, ConnectStatus, OperStatus
+from python.protos.core_adapter_pb2 import StrType, BoolType, IntType, Packet
+from python.protos.device_pb2 import Device, Ports
+from python.protos.voltha_pb2 import CoreInstance
 
 log = structlog.get_logger()
 
@@ -243,7 +243,8 @@
         b = BoolType()
         b.val = init
         res = yield self.invoke(rpc="DevicePMConfigUpdate",
-                                device_pm_config=device_pm_config, init=b)
+                                device_pm_config=device_pm_config,
+                                init=b)
         returnValue(res)
 
     @ContainerProxy.wrap_request(None)
@@ -252,7 +253,8 @@
         log.debug("port_created")
         proto_id = ID()
         proto_id.id = device_id
-        res = yield self.invoke(rpc="PortCreated", device_id=proto_id,
+        res = yield self.invoke(rpc="PortCreated",
+                                device_id=proto_id,
                                 port=port)
         returnValue(res)
 
@@ -274,5 +276,16 @@
     def image_download_deleted(img_dnld):
         raise NotImplementedError()
 
-    def packet_in(device_id, egress_port_no, packet):
-        raise NotImplementedError()
+    def send_packet_in(self, device_id, port, packet):
+        log.debug("send_packet_in")
+        proto_id = ID()
+        proto_id.id = device_id
+        p = IntType()
+        p.val = port
+        pac = Packet()
+        pac.payload = packet
+        res = yield self.invoke(rpc="PacketIn",
+                                device_id=proto_id,
+                                port=p,
+                                packet=pac)
+        returnValue(res)
diff --git a/python/adapters/kafka/event_bus_publisher.py b/python/adapters/kafka/event_bus_publisher.py
index 011fdea..89b3385 100644
--- a/python/adapters/kafka/event_bus_publisher.py
+++ b/python/adapters/kafka/event_bus_publisher.py
@@ -25,7 +25,7 @@
 from google.protobuf.message import Message
 from simplejson import dumps
 
-from adapters.common.event_bus import EventBusClient
+from python.common.event_bus import EventBusClient
 
 log = structlog.get_logger()
 
diff --git a/python/adapters/kafka/kafka_inter_container_library.py b/python/adapters/kafka/kafka_inter_container_library.py
index 3f6f5eb..1d2b05c 100644
--- a/python/adapters/kafka/kafka_inter_container_library.py
+++ b/python/adapters/kafka/kafka_inter_container_library.py
@@ -25,10 +25,10 @@
     DeferredQueue, gatherResults
 from zope.interface import implementer
 
-from adapters.common.utils import asleep
-from adapters.common.utils.registry import IComponent
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.protos.core_adapter_pb2 import MessageType, Argument, \
+from python.common.utils import asleep
+from python.common.utils.registry import IComponent
+from kafka_proxy import KafkaProxy, get_kafka_proxy
+from python.protos.core_adapter_pb2 import MessageType, Argument, \
     InterContainerRequestBody, InterContainerMessage, Header, \
     InterContainerResponseBody
 
diff --git a/python/adapters/kafka/kafka_proxy.py b/python/adapters/kafka/kafka_proxy.py
index c11caa7..6dcb10f 100644
--- a/python/adapters/kafka/kafka_proxy.py
+++ b/python/adapters/kafka/kafka_proxy.py
@@ -23,9 +23,9 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from zope.interface import implementer
 
-from adapters.common.utils.consulhelpers import get_endpoint_from_consul
-from adapters.common.utils.registry import IComponent
-from adapters.kafka.event_bus_publisher import EventBusPublisher
+from python.common.utils.consulhelpers import get_endpoint_from_consul
+from python.common.utils.registry import IComponent
+from event_bus_publisher import EventBusPublisher
 
 log = get_logger()
 
diff --git a/python/adapters/ponsim_olt/main.py b/python/adapters/ponsim_olt/main.py
index 569e284..09b78fc 100755
--- a/python/adapters/ponsim_olt/main.py
+++ b/python/adapters/ponsim_olt/main.py
@@ -29,22 +29,22 @@
 from twisted.internet.task import LoopingCall
 from zope.interface import implementer
 
-from adapters.common.structlog_setup import setup_logging, update_logging
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.deferred_utils import TimeOutError
-from adapters.common.utils.dockerhelpers import get_my_containers_name
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+from python.common.structlog_setup import setup_logging, update_logging
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import TimeOutError
+from python.common.utils.dockerhelpers import get_my_containers_name
+from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
     get_my_primary_interface
-from adapters.common.utils.registry import registry, IComponent
-from adapters.kafka.adapter_proxy import AdapterProxy
-from adapters.kafka.adapter_request_facade import AdapterRequestFacade
-from adapters.kafka.core_proxy import CoreProxy
-from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+from python.common.utils.registry import registry, IComponent
+from python.adapters.kafka.adapter_proxy import AdapterProxy
+from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from python.adapters.kafka.core_proxy import CoreProxy
+from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
     get_messaging_proxy
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.ponsim_olt.ponsim_olt import PonSimOltAdapter
-from adapters.protos import third_party
-from adapters.protos.adapter_pb2 import AdapterConfig
+from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from ponsim_olt import PonSimOltAdapter
+from python.protos import third_party
+from python.protos.adapter_pb2 import AdapterConfig
 
 _ = third_party
 
diff --git a/python/adapters/ponsim_olt/ponsim_olt.py b/python/adapters/ponsim_olt/ponsim_olt.py
index 52fb63b..df834e5 100644
--- a/python/adapters/ponsim_olt/ponsim_olt.py
+++ b/python/adapters/ponsim_olt/ponsim_olt.py
@@ -23,6 +23,8 @@
 import structlog
 from google.protobuf.empty_pb2 import Empty
 from google.protobuf.json_format import MessageToDict
+from scapy.layers.inet import Raw
+import json
 from google.protobuf.message import Message
 from grpc._channel import _Rendezvous
 from scapy.layers.l2 import Ether, Dot1Q
@@ -31,25 +33,25 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.task import LoopingCall
 
-from adapters.common.frameio.frameio import BpfProgramFilter, hexify
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.registry import registry
-from adapters.iadapter import OltAdapter
-from adapters.kafka.kafka_proxy import get_kafka_proxy
-from adapters.protos import ponsim_pb2
-from adapters.protos import third_party
-from adapters.protos.common_pb2 import OperStatus, ConnectStatus
-from adapters.protos.core_adapter_pb2 import SwitchCapability, PortCapability, \
+from python.adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from python.common.utils.asleep import asleep
+from python.common.utils.registry import registry
+from python.adapters.iadapter import OltAdapter
+from python.adapters.kafka.kafka_proxy import get_kafka_proxy
+from python.protos import ponsim_pb2
+from python.protos import third_party
+from python.protos.common_pb2 import OperStatus, ConnectStatus
+from python.protos.core_adapter_pb2 import SwitchCapability, PortCapability, \
     InterAdapterMessageType, InterAdapterResponseBody
-from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
-from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
-from adapters.protos.logical_device_pb2 import LogicalPort
-from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from python.protos.device_pb2 import Port, PmConfig, PmConfigs
+from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from python.protos.logical_device_pb2 import LogicalPort
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPF_1GB_FD, \
     OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
     ofp_switch_features, ofp_desc
-from adapters.protos.openflow_13_pb2 import ofp_port
-from adapters.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest
+from python.protos.openflow_13_pb2 import ofp_port
+from python.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest
 
 _ = third_party
 log = structlog.get_logger()
@@ -157,33 +159,11 @@
         self.device = device
         self.lc = None
 
+    # TODO: Implement code to send to kafka cluster directly instead of
+    # going through the voltha core.
     def send_alarm(self, context_data, alarm_data):
-        try:
-            current_context = {}
-            for key, value in context_data.__dict__.items():
-                current_context[key] = str(value)
-
-            alarm_event = self.adapter.adapter_agent.create_alarm(
-                resource_id=self.device.id,
-                description="{}.{} - {}".format(self.adapter.name,
-                                                self.device.id,
-                                                alarm_data[
-                                                    'description']) if 'description' in alarm_data else None,
-                type=alarm_data['type'] if 'type' in alarm_data else None,
-                category=alarm_data[
-                    'category'] if 'category' in alarm_data else None,
-                severity=alarm_data[
-                    'severity'] if 'severity' in alarm_data else None,
-                state=alarm_data['state'] if 'state' in alarm_data else None,
-                raised_ts=alarm_data['ts'] if 'ts' in alarm_data else 0,
-                context=current_context
-            )
-
-            self.adapter.adapter_agent.submit_alarm(self.device.id,
-                                                    alarm_event)
-
-        except Exception as e:
-            log.exception('failed-to-send-alarm', e=e)
+        log.debug("send-alarm-not-implemented")
+        return
 
 
 class PonSimOltAdapter(OltAdapter):
@@ -379,6 +359,27 @@
     def reconcile(self, device):
         self.log.info('reconciling-OLT-device')
 
+    def _rcv_frame(self, frame):
+        pkt = Ether(frame)
+
+        if pkt.haslayer(Dot1Q):
+            outer_shim = pkt.getlayer(Dot1Q)
+
+            if isinstance(outer_shim.payload, Dot1Q):
+                inner_shim = outer_shim.payload
+                cvid = inner_shim.vlan
+                popped_frame = (
+                        Ether(src=pkt.src, dst=pkt.dst, type=inner_shim.type) /
+                        inner_shim.payload
+                )
+                self.log.info('sending-packet-in',device_id=self.device_id, port=cvid)
+                self.core_proxy.send_packet_in(device_id=self.device_id,
+                                               port=cvid,
+                                               packet=str(popped_frame))
+            elif pkt.haslayer(Raw):
+                raw_data = json.loads(pkt.getlayer(Raw).load)
+                self.alarms.send_alarm(self, raw_data)
+
     @inlineCallbacks
     def rcv_grpc(self):
         """
@@ -504,7 +505,7 @@
         out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
 
         # send over grpc stream
-        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        stub = ponsim_pb2.PonSimStub(self.channel)
         frame = PonSimFrame(id=self.device_id, payload=str(out_pkt),
                             out_port=out_port)
         stub.SendFrame(frame)
diff --git a/python/adapters/ponsim_onu/main.py b/python/adapters/ponsim_onu/main.py
index 3f18e50..d6418e9 100755
--- a/python/adapters/ponsim_onu/main.py
+++ b/python/adapters/ponsim_onu/main.py
@@ -29,22 +29,22 @@
 from twisted.internet.task import LoopingCall
 from zope.interface import implementer
 
-from adapters.common.structlog_setup import setup_logging, update_logging
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.deferred_utils import TimeOutError
-from adapters.common.utils.dockerhelpers import get_my_containers_name
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+from python.common.structlog_setup import setup_logging, update_logging
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import TimeOutError
+from python.common.utils.dockerhelpers import get_my_containers_name
+from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
     get_my_primary_interface
-from adapters.common.utils.registry import registry, IComponent
-from adapters.kafka.adapter_proxy import AdapterProxy
-from adapters.kafka.adapter_request_facade import AdapterRequestFacade
-from adapters.kafka.core_proxy import CoreProxy
-from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+from python.common.utils.registry import registry, IComponent
+from python.adapters.kafka.adapter_proxy import AdapterProxy
+from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from python.adapters.kafka.core_proxy import CoreProxy
+from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
     get_messaging_proxy
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.ponsim_onu.ponsim_onu import PonSimOnuAdapter
-from adapters.protos import third_party
-from adapters.protos.adapter_pb2 import AdapterConfig
+from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from ponsim_onu import PonSimOnuAdapter
+from python.protos import third_party
+from python.protos.adapter_pb2 import AdapterConfig
 
 _ = third_party
 
diff --git a/python/adapters/ponsim_onu/ponsim_onu.py b/python/adapters/ponsim_onu/ponsim_onu.py
index e15d0a9..eb4d716 100644
--- a/python/adapters/ponsim_onu/ponsim_onu.py
+++ b/python/adapters/ponsim_onu/ponsim_onu.py
@@ -29,20 +29,20 @@
     returnValue, Deferred
 from twisted.internet.task import LoopingCall
 
-from adapters.common.utils.asleep import asleep
-from adapters.iadapter import OnuAdapter
-from adapters.kafka.kafka_proxy import get_kafka_proxy
-from adapters.protos import third_party
-from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
-from adapters.protos.core_adapter_pb2 import PortCapability, \
+from python.common.utils.asleep import asleep
+from python.adapters.iadapter import OnuAdapter
+from python.adapters.kafka.kafka_proxy import get_kafka_proxy
+from python.protos import third_party
+from python.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from python.protos.core_adapter_pb2 import PortCapability, \
     InterAdapterMessageType, InterAdapterResponseBody
-from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
-from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
-from adapters.protos.logical_device_pb2 import LogicalPort
-from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from python.protos.device_pb2 import Port, PmConfig, PmConfigs
+from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from python.protos.logical_device_pb2 import LogicalPort
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPF_1GB_FD
-from adapters.protos.openflow_13_pb2 import ofp_port
-from adapters.protos.ponsim_pb2 import FlowTable, PonSimMetricsRequest, PonSimMetrics
+from python.protos.openflow_13_pb2 import ofp_port
+from python.protos.ponsim_pb2 import FlowTable, PonSimMetricsRequest, PonSimMetrics
 
 _ = third_party
 log = structlog.get_logger()
diff --git a/python/adapters/requirements.txt b/python/adapters/requirements.txt
deleted file mode 100755
index a0641b2..0000000
--- a/python/adapters/requirements.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-argparse==1.2.1
-arrow==0.10.0
-bitstring==3.1.5
-cmd2==0.7.0
-colorama==0.3.9
-cython==0.24.1
-decorator==4.1.2
-docker-py==1.10.6
-fluent-logger==0.6.0
-grpc==0.3.post19
-grpcio==1.3.5
-grpcio-tools==1.3.5
-hash_ring==1.3.1
-hexdump==3.3
-jinja2==2.8
-jsonpatch==1.16
-kafka_python==1.3.5
-klein==17.10.0
-kubernetes==5.0.0
-netaddr==0.7.19
-networkx==2.0
-nose==1.3.7
-nose-exclude==0.5.0
-nose-testconfig==0.10
-mock==2.0.0
-netifaces==0.10.6
-pcapy==0.11.1
-pep8==1.7.1
-pep8-naming>=0.3.3
-protobuf==3.3.0
-protobuf-to-dict==0.1.0
-pyflakes==1.6.0
-pylint==1.7.6
-#pypcap>=1.1.5
-pyOpenSSL==17.3.0
-PyYAML==3.12
-requests==2.18.4
-scapy==2.3.3
-service-identity==17.0.0
-simplejson==3.12.0
-jsonschema==2.6.0
-six==1.11.0
-structlog==17.2.0
-termcolor==1.1.0
-transitions==0.6.4
-treq==17.8.0
-Twisted==17.9.0
-txaioetcd==0.3.0
-urllib3==1.22
-pyang==1.7.3
-lxml==3.6.4
-nosexcover==1.0.11
-zmq==0.0.0
-pyzmq==16.0.3
-txZMQ==0.8.0
-ncclient==0.5.3
-xmltodict==0.11.0
-dicttoxml==1.7.4
-etcd3==0.7.0
-pyparsing==2.2.0
-packaging==17.1
-
-# python-consul>=0.6.1  we need the pre-released version for now, because 0.6.1 does not
-# yet support Twisted. Once this is released, it will be the 0.6.2 version
-git+https://github.com/cablehead/python-consul.git
-
-# Twisted Python kafka client
-git+https://github.com/ciena/afkak.git
diff --git a/python/cli/README.md b/python/cli/README.md
new file mode 100644
index 0000000..c810df4
--- /dev/null
+++ b/python/cli/README.md
@@ -0,0 +1,14 @@
+## CLI (~/cli)
+
+* Add auto-completion for most common args like device and logical device ids
+* Add consistent argument checking
+* Unify code that retrieves data from gRPC
+* Unify code that prints out data/response, to allow:
+  * Selectable output mode:
+    * JSON
+    * Tabular
+* Organize history per sub context so that in each context the commands 
+  entered in that context will show
+* Metaprogramming [BIG ONE]: Make large part of the commands come from annotations embedded in
+  the protobuf files and have corresponding handler auto-generated by protoc
+* Package CLI as docker container, bake it into composition
diff --git a/python/common/frameio/__init__.py b/python/cli/__init__.py
similarity index 100%
rename from python/common/frameio/__init__.py
rename to python/cli/__init__.py
diff --git a/python/cli/alarm_filters.py b/python/cli/alarm_filters.py
new file mode 100644
index 0000000..ed2af32
--- /dev/null
+++ b/python/cli/alarm_filters.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Alarm filter CLI commands
+"""
+from optparse import make_option, OptionValueError
+
+from cmd2 import Cmd, options
+from google.protobuf.empty_pb2 import Empty
+
+from table import print_pb_list_as_table
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.events_pb2 import AlarmEventType, AlarmEventSeverity, AlarmEventCategory
+
+_ = third_party
+
+
+class AlarmFiltersCli(Cmd):
+    def __init__(self, get_stub):
+        Cmd.__init__(self)
+        self.get_stub = get_stub
+        self.prompt = '(' + self.colorize(
+            self.colorize('alarm_filters', 'red'), 'bold') + ') '
+
+    def cmdloop(self):
+        self._cmdloop()
+
+    def help_show(self):
+        self.poutput(
+'''
+Display the list of configured filters.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID                Display the filter rules for a specific filter id (OPTIONAL)
+
+'''
+        )
+
+    @options([
+        make_option('-i', '--filter-id', action="store", dest='filter_id')
+    ])
+    def do_show(self, line, opts):
+        stub = self.get_stub()
+
+        if not opts.filter_id:
+            result = stub.ListAlarmFilters(Empty())
+            print_pb_list_as_table("Alarm Filters:", result.filters, {}, self.poutput)
+        else:
+            result = stub.GetAlarmFilter(voltha_pb2.ID(id=opts.filter_id))
+            print_pb_list_as_table("Rules for Filter ID = {}:".format(opts.filter_id),
+                                   result.rules, {}, self.poutput)
+
+    @staticmethod
+    def construct_rule(raw_rule):
+        rule = dict()
+
+        rule_kv = raw_rule.strip().split(':')
+
+        if len(rule_kv) == 2:
+            rule['key'] = rule_kv[0].lower()
+            rule['value'] = rule_kv[1].lower()
+        else:
+            raise OptionValueError("Error: A rule must be a colon separated key/value pair")
+
+        return rule
+
+    def parse_filter_rules(option, opt_str, value, parser):
+        rules = getattr(parser.values, option.dest)
+        if rules is None:
+            rules = list()
+            rules.append(AlarmFiltersCli.construct_rule(value))
+
+            for arg in parser.rargs:
+                if (arg[:2] == "--" and len(arg) > 2) or (arg[:1] == "-" and len(arg) > 1 and arg[1] != "-"):
+                    break
+                else:
+                    rules.append(AlarmFiltersCli.construct_rule(arg))
+
+            setattr(parser.values, option.dest, rules)
+        else:
+            raise OptionValueError('Warning: The filter rule option can only be specified once')
+
+    def help_create(self):
+        types = list(
+            k for k, v in
+            AlarmEventType.DESCRIPTOR.enum_values_by_name.items())
+        categories = list(
+            k for k, v in
+            AlarmEventCategory.DESCRIPTOR.enum_values_by_name.items())
+        severities = list(
+            k for k, v in
+            AlarmEventSeverity.DESCRIPTOR.enum_values_by_name.items())
+
+        alarm_types = types
+        alarm_categories = categories
+        alarm_severities = severities
+
+        usage = '''
+Create a new alarm filter.
+
+Valid options:
+
+-r rule:value ... | --filter-rules rule:value ...   Specify one or more filter rules as key/value pairs (REQUIRED)
+
+Valid rule keys and expected values:
+
+id          : Identifier of an incoming alarm
+type        : Type of an incoming alarm {}
+category    : Category of an incoming alarm {}
+severity    : Severity of an incoming alarm {}
+resource_id : Resource identifier of an incoming alarm
+device_id   : Device identifier of an incoming alarm
+
+Example:
+
+# Filter any alarm that matches the following criteria
+
+create -r type:environment severity:indeterminate
+create -r device_id:754f9dcbe4a6
+
+'''.format(alarm_types, alarm_categories, alarm_severities)
+
+        self.poutput(usage)
+
+    @options([
+        make_option('-r', '--filter-rules', help='<key>:<value>...', action="callback",
+                    callback=parse_filter_rules, type='string', dest='filter_rules'),
+    ])
+    def do_create(self, line, opts):
+        if opts.filter_rules:
+            stub = self.get_stub()
+            result = stub.CreateAlarmFilter(voltha_pb2.AlarmFilter(rules=opts.filter_rules))
+            print_pb_list_as_table("Rules for Filter ID = {}:".format(result.id),
+                                   result.rules, {}, self.poutput)
+
+    def help_delete(self):
+        self.poutput(
+'''
+Delete a specific alarm filter entry.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID                Display the filter rules for a specific filter id (REQUIRED)
+
+'''
+        )
+
+    @options([
+        make_option('-i', '--filter-id', action="store", dest='filter_id')
+    ])
+    def do_delete(self, line, opts):
+        if not opts.filter_id:
+            self.poutput(self.colorize('Error: ', 'red') + 'Specify ' + \
+                         self.colorize(self.colorize('"filter id"', 'blue'),
+                                       'bold') + ' to update')
+            return
+
+        stub = self.get_stub()
+        stub.DeleteAlarmFilter(voltha_pb2.ID(id=opts.filter_id))
+
+    def help_update(self):
+        types = list(
+            k for k, v in
+            AlarmEventType.DESCRIPTOR.enum_values_by_name.items())
+        categories = list(
+            k for k, v in
+            AlarmEventCategory.DESCRIPTOR.enum_values_by_name.items())
+        severities = list(
+            k for k, v in
+            AlarmEventSeverity.DESCRIPTOR.enum_values_by_name.items())
+
+        alarm_types = types
+        alarm_categories = categories
+        alarm_severities = severities
+
+        usage = '''
+Update the filter rules for an existing alarm filter.
+
+Valid options:
+
+-i FILTER_ID | --filter-id=FILTER_ID                Indicate the alarm filter identifier to update (REQUIRED)
+-r rule:value ... | --filter-rules rule:value ...   Specify one or more filter rules as key/value pairs (REQUIRED)
+
+Valid rule keys and expected values:
+
+id          : Identifier of an incoming alarm
+type        : Type of an incoming alarm {}
+category    : Category of an incoming alarm {}
+severity    : Severity of an incoming alarm {}
+resource_id : Resource identifier of an incoming alarm
+device_id   : Device identifier of an incoming alarm
+
+Example:
+
+# Filter any alarm that matches the following criteria
+
+update -i 9da115b900bc -r type:environment severity:indeterminate resource_id:1554b0517a07
+
+'''.format(alarm_types, alarm_categories, alarm_severities)
+
+        self.poutput(usage)
+
+    @options([
+        make_option('-r', '--filter-rules', help='<key>:<value>...', action="callback",
+                    callback=parse_filter_rules, type='string', dest='filter_rules'),
+        make_option('-i', '--filter-id', action="store", dest='filter_id')
+    ])
+    def do_update(self, line, opts):
+        if not opts.filter_id:
+            self.poutput(self.colorize('Error: ', 'red') + 'Specify ' + \
+                         self.colorize(self.colorize('"filter id"', 'blue'),
+                                       'bold') + ' to update')
+            return
+
+        if opts.filter_rules:
+            stub = self.get_stub()
+            result = stub.UpdateAlarmFilter(
+                voltha_pb2.AlarmFilter(id=opts.filter_id, rules=opts.filter_rules)
+            )
+            print_pb_list_as_table("Rules for Filter ID = {}:".format(result.id),
+                                   result.rules, {}, self.poutput)
diff --git a/python/cli/device.py b/python/cli/device.py
new file mode 100644
index 0000000..38ea835
--- /dev/null
+++ b/python/cli/device.py
@@ -0,0 +1,597 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Device level CLI commands
+"""
+from optparse import make_option
+from cmd2 import Cmd, options
+from simplejson import dumps
+
+from table import print_pb_as_table, print_pb_list_as_table
+from utils import print_flows, pb2dict, enum2name
+from python.protos import third_party
+
+_ = third_party
+from python.protos import voltha_pb2, common_pb2
+import sys
+import json
+from google.protobuf.json_format import MessageToDict
+
+# Since proto3 won't send fields that are set to 0/false/"" any object that
+# might have those values set in them needs to be replicated here such that the
+# fields can be adequately
+
+
+class DeviceCli(Cmd):
+
+    def __init__(self, device_id, get_stub):
+        Cmd.__init__(self)
+        self.get_stub = get_stub
+        self.device_id = device_id
+        self.prompt = '(' + self.colorize(
+            self.colorize('device {}'.format(device_id), 'red'), 'bold') + ') '
+        self.pm_config_last = None
+        self.pm_config_dirty = False
+
+    def cmdloop(self):
+        self._cmdloop()
+
+    def get_device(self, depth=0):
+        stub = self.get_stub()
+        res = stub.GetDevice(voltha_pb2.ID(id=self.device_id),
+                             metadata=(('get-depth', str(depth)), ))
+        return res
+
+    do_exit = Cmd.do_quit
+
+    def do_quit(self, line):
+        if self.pm_config_dirty:
+            self.poutput("Uncommited changes for " + \
+                         self.colorize(
+                             self.colorize("perf_config,", "blue"),
+                             "bold") + " please either " + self.colorize(
+                             self.colorize("commit", "blue"), "bold") + \
+                         " or " + self.colorize(
+                             self.colorize("reset", "blue"), "bold") + \
+                         " your changes using " + \
+                         self.colorize(
+                             self.colorize("perf_config", "blue"), "bold"))
+            return False
+        else:
+            return self._STOP_AND_EXIT
+
+    def do_show(self, line):
+        """Show detailed device information"""
+        print_pb_as_table('Device {}'.format(self.device_id),
+                          self.get_device(depth=-1))
+
+    def do_ports(self, line):
+        """Show ports of device"""
+        device = self.get_device(depth=-1)
+        omit_fields = {
+        }
+        print_pb_list_as_table('Device ports:', device.ports,
+                               omit_fields, self.poutput)
+
+    def complete_perf_config(self, text, line, begidx, endidx):
+        sub_cmds = {"show", "set", "commit", "reset"}
+        sub_opts = {"-f", "-e", "-d", "-o"}
+        # Help the interpreter complete the paramters.
+        completions = []
+        if not self.pm_config_last:
+            device = self.get_device(depth=-1)
+            self.pm_config_last = device.pm_configs
+        m_names = [d.name for d in self.pm_config_last.metrics]
+        cur_cmd = line.strip().split(" ")
+        try:
+            if not text and len(cur_cmd) == 1:
+                completions = ("show", "set", "commit", "reset")
+            elif len(cur_cmd) == 2:
+                if "set" == cur_cmd[1]:
+                    completions = [d for d in sub_opts]
+                else:
+                    completions = [d for d in sub_cmds if d.startswith(text)]
+            elif len(cur_cmd) > 2 and cur_cmd[1] == "set":
+                if cur_cmd[len(cur_cmd)-1] == "-":
+                    completions = [list(d)[1] for d in sub_opts]
+                elif cur_cmd[len(cur_cmd)-1] == "-f":
+                    completions = ("\255","Please enter a sampling frequency in 10ths of a second")
+                elif cur_cmd[len(cur_cmd)-2] == "-f":
+                    completions = [d for d in sub_opts]
+                elif cur_cmd[len(cur_cmd)-1] in {"-e","-d","-o"}:
+                    if self.pm_config_last.grouped:
+                        pass
+                    else:
+                        completions = [d.name for d in self.pm_config_last.metrics]
+                elif cur_cmd[len(cur_cmd)-2] in {"-e","-d"}:
+                    if text and text not in m_names:
+                        completions = [d for d in m_names if d.startswith(text)]
+                    else:
+                        completions = [d for d in sub_opts]
+                elif cur_cmd[len(cur_cmd)-2] == "-o":
+                    if cur_cmd[len(cur_cmd)-1] in [d.name for d in self.pm_config_last.metrics]:
+                        completions = ("\255","Please enter a sampling frequency in 10ths of a second")
+                    else:
+                        completions = [d for d in m_names if d.startswith(text)]
+                elif cur_cmd[len(cur_cmd)-3] == "-o":
+                    completions = [d for d in sub_opts]
+        except:
+            e = sys.exc_info()
+            print(e)
+        return completions
+
+
+    def help_perf_config(self):
+        self.poutput(
+'''
+perf_config [show | set | commit | reset] [-f <default frequency>] [{-e <metric/group
+            name>}] [{-d <metric/group name>}] [{-o <metric/group name> <override
+            frequency>}]
+
+show: displays the performance configuration of the device
+set: changes the parameters specified with -e, -d, and -o
+reset: reverts any changes made since the last commit
+commit: commits any changes made which applies them to the device.
+
+-e: enable collection of the specified metric, more than one -e may be
+            specified.
+-d: disable collection of the specified metric, more than on -d may be
+            specified.
+-o: override the collection frequency of the specified metric, more than one -o
+            may be specified. Note that -o isn't valid unless
+            frequency_override is set to True for the device.
+
+Changes made by set are held locally until a commit or reset command is issued.
+A commit command will write the configuration to the device and it takes effect
+immediately. The reset command will undo any changes since the start of the
+device session.
+
+If grouped is true then the -d, -e and -o commands refer to groups and not
+individual metrics.
+'''
+        )
+
+    @options([
+        make_option('-f', '--default_freq', action="store", dest='default_freq',
+                    type='long', default=None),
+        make_option('-e', '--enable', action='append', dest='enable',
+                    default=None),
+        make_option('-d', '--disable', action='append', dest='disable',
+                    default=None),
+        make_option('-o', '--override', action='append', dest='override',
+                    nargs=2, default=None, type='string'),
+    ])
+    def do_perf_config(self, line, opts):
+        """Show and set the performance monitoring configuration of the device"""
+
+        device = self.get_device(depth=-1)
+        if not self.pm_config_last:
+            self.pm_config_last = device.pm_configs
+
+        # Ensure that a valid sub-command was provided
+        if line.strip() not in {"set", "show", "commit", "reset", ""}:
+                self.poutput(self.colorize('Error: ', 'red') +
+                             self.colorize(self.colorize(line.strip(), 'blue'),
+                                           'bold') + ' is not recognized')
+                return
+
+        # Ensure no options are provided when requesting to view the config
+        if line.strip() == "show" or line.strip() == "":
+            if opts.default_freq or opts.enable or opts.disable:
+                self.poutput(opts.disable)
+                self.poutput(self.colorize('Error: ', 'red') + 'use ' +
+                             self.colorize(self.colorize('"set"', 'blue'),
+                                           'bold') + ' to change settings')
+                return
+
+        if line.strip() == "set":  # Set the supplied values
+            metric_list = set()
+            if opts.enable is not None:
+                metric_list |= {metric for metric in opts.enable}
+            if opts.disable is not None:
+                metric_list |= {metric for metric in opts.disable}
+            if opts.override is not None:
+                metric_list |= {metric for metric, _ in opts.override}
+
+            # The default frequency
+            if opts.default_freq:
+                self.pm_config_last.default_freq = opts.default_freq
+                self.pm_config_dirty = True
+
+            # Field or group visibility
+            if self.pm_config_last.grouped:
+                for g in self.pm_config_last.groups:
+                    if opts.enable:
+                        if g.group_name in opts.enable:
+                            g.enabled = True
+                            self.pm_config_dirty = True
+                            metric_list.discard(g.group_name)
+                for g in self.pm_config_last.groups:
+                    if opts.disable:
+                        if g.group_name in opts.disable:
+                            g.enabled = False
+                            self.pm_config_dirty = True
+                            metric_list.discard(g.group_name)
+            else:
+                for m in self.pm_config_last.metrics:
+                    if opts.enable:
+                        if m.name in opts.enable:
+                            m.enabled = True
+                            self.pm_config_dirty = True
+                            metric_list.discard(m.name)
+                for m in self.pm_config_last.metrics:
+                    if opts.disable:
+                        if m.name in opts.disable:
+                            m.enabled = False
+                            self.pm_config_dirty = True
+                            metric_list.discard(m.name)
+
+            # Frequency overrides.
+            if opts.override:
+                if self.pm_config_last.freq_override:
+                    oo = dict()
+                    for o in opts.override:
+                        oo[o[0]] = o[1]
+                    if self.pm_config_last.grouped:
+                        for g in self.pm_config_last.groups:
+                            if g.group_name in oo:
+                                try:
+                                    g.group_freq = int(oo[g.group_name])
+                                except ValueError:
+                                    self.poutput(self.colorize('Warning: ',
+                                                               'yellow') +
+                                                 self.colorize(oo[g.group_name],
+                                                               'blue') +
+                                                 " is not an integer... ignored")
+                                del oo[g.group_name]
+                                self.pm_config_dirty = True
+                                metric_list.discard(g.group_name)
+                    else:
+                        for m in self.pm_config_last.metrics:
+                            if m.name in oo:
+                                try:
+                                    m.sample_freq = int(oo[m.name])
+                                except ValueError:
+                                    self.poutput(self.colorize('Warning: ',
+                                                               'yellow') +
+                                                 self.colorize(oo[m.name],
+                                                               'blue') +
+                                                 " is not an integer... ignored")
+                                del oo[m.name]
+                                self.pm_config_dirty = True
+                                metric_list.discard(m.name)
+
+                    # If there's anything left the input was typoed
+                    if self.pm_config_last.grouped:
+                        field = 'group'
+                    else:
+                        field = 'metric'
+                    for o in oo:
+                        self.poutput(self.colorize('Warning: ', 'yellow') +
+                                     'the parameter' + ' ' +
+                                     self.colorize(o, 'blue') + ' is not ' +
+                                     'a ' + field + ' name... ignored')
+                    if oo:
+                        return
+
+                else:  # Frequency overrides not enabled
+                    self.poutput(self.colorize('Error: ', 'red') +
+                                 'Individual overrides are only ' +
+                                 'supported if ' +
+                                 self.colorize('freq_override', 'blue') +
+                                 ' is set to ' + self.colorize('True', 'blue'))
+                    return
+
+            if len(metric_list):
+                metric_name_list = ", ".join(str(metric) for metric in metric_list)
+                self.poutput(self.colorize('Error: ', 'red') +
+                             'Metric/Metric Group{} '.format('s' if len(metric_list) > 1 else '') +
+                             self.colorize(metric_name_list, 'blue') +
+                             ' {} not found'.format('were' if len(metric_list) > 1 else 'was'))
+                return
+
+            self.poutput("Success")
+            return
+
+        elif line.strip() == "commit" and self.pm_config_dirty:
+            stub = self.get_stub()
+            stub.UpdateDevicePmConfigs(self.pm_config_last)
+            self.pm_config_last = self.get_device(depth=-1).pm_configs
+            self.pm_config_dirty = False
+
+        elif line.strip() == "reset" and self.pm_config_dirty:
+            self.pm_config_last = self.get_device(depth=-1).pm_configs
+            self.pm_config_dirty = False
+
+        omit_fields = {'groups', 'metrics', 'id'}
+        print_pb_as_table('PM Config:', self.pm_config_last, omit_fields,
+                          self.poutput,show_nulls=True)
+        if self.pm_config_last.grouped:
+            #self.poutput("Supported metric groups:")
+            for g in self.pm_config_last.groups:
+                if self.pm_config_last.freq_override:
+                    omit_fields = {'metrics'}
+                else:
+                    omit_fields = {'group_freq','metrics'}
+                print_pb_as_table('', g, omit_fields, self.poutput,
+                                  show_nulls=True)
+                if g.enabled:
+                    state = 'enabled'
+                else:
+                    state = 'disabled'
+                print_pb_list_as_table(
+                    'Metric group {} is {}'.format(g.group_name,state),
+                    g.metrics, {'enabled', 'sample_freq'}, self.poutput,
+                    dividers=100, show_nulls=True)
+        else:
+            if self.pm_config_last.freq_override:
+                omit_fields = {}
+            else:
+                omit_fields = {'sample_freq'}
+            print_pb_list_as_table('Supported metrics:', self.pm_config_last.metrics,
+                                   omit_fields, self.poutput, dividers=100,
+                                   show_nulls=True)
+
+    def do_flows(self, line):
+        """Show flow table for device"""
+        device = pb2dict(self.get_device(-1))
+        print_flows(
+            'Device',
+            self.device_id,
+            type=device['type'],
+            flows=device['flows']['items'],
+            groups=device['flow_groups']['items']
+        )
+
+    def do_images(self, line):
+        """Show software images on the device"""
+        device = self.get_device(depth=-1)
+        omit_fields = {}
+        print_pb_list_as_table('Software Images:', device.images.image,
+                               omit_fields, self.poutput, show_nulls=True)
+
+    @options([
+        make_option('-u', '--url', action='store', dest='url',
+                    help="URL to get sw image"),
+        make_option('-n', '--name', action='store', dest='name',
+                    help="Image name"),
+        make_option('-c', '--crc', action='store', dest='crc',
+                    help="CRC code to verify with", default=0),
+        make_option('-v', '--version', action='store', dest='version',
+                    help="Image version", default=0),
+    ])
+    def do_img_dnld_request(self, line, opts):
+        """
+        Request image download to a device
+        """
+        device = self.get_device(depth=-1)
+        self.poutput('device_id {}'.format(device.id))
+        self.poutput('name {}'.format(opts.name))
+        self.poutput('url {}'.format(opts.url))
+        self.poutput('crc {}'.format(opts.crc))
+        self.poutput('version {}'.format(opts.version))
+        try:
+            device_id = device.id
+            if device_id and opts.name and opts.url:
+                kw = dict(id=device_id)
+                kw['name'] = opts.name
+                kw['url'] = opts.url
+            else:
+                self.poutput('Device ID and URL are needed')
+                raise Exception('Device ID and URL are needed')
+        except Exception as e:
+            self.poutput('Error request img dnld {}.  Error:{}'.format(device_id, e))
+            return
+        kw['crc'] = long(opts.crc)
+        kw['image_version'] = opts.version
+        response = None
+        try:
+            request = voltha_pb2.ImageDownload(**kw)
+            stub = self.get_stub()
+            response = stub.DownloadImage(request)
+        except Exception as e:
+            self.poutput('Error download image {}. Error:{}'.format(kw['id'], e))
+            return
+        name = enum2name(common_pb2.OperationResp,
+                        'OperationReturnCode', response.code)
+        self.poutput('response: {}'.format(name))
+        self.poutput('{}'.format(response))
+
+    @options([
+        make_option('-n', '--name', action='store', dest='name',
+                    help="Image name"),
+    ])
+    def do_img_dnld_status(self, line, opts):
+        """
+        Get a image download status
+        """
+        device = self.get_device(depth=-1)
+        self.poutput('device_id {}'.format(device.id))
+        self.poutput('name {}'.format(opts.name))
+        try:
+            device_id = device.id
+            if device_id and opts.name:
+                kw = dict(id=device_id)
+                kw['name'] = opts.name
+            else:
+                self.poutput('Device ID, Image Name are needed')
+                raise Exception('Device ID, Image Name are needed')
+        except Exception as e:
+            self.poutput('Error get img dnld status {}.  Error:{}'.format(device_id, e))
+            return
+        status = None
+        try:
+            img_dnld = voltha_pb2.ImageDownload(**kw)
+            stub = self.get_stub()
+            status = stub.GetImageDownloadStatus(img_dnld)
+        except Exception as e:
+            self.poutput('Error get img dnld status {}. Error:{}'.format(device_id, e))
+            return
+        fields_to_omit = {
+              'crc',
+              'local_dir',
+        }
+        try:
+            print_pb_as_table('ImageDownload Status:', status, fields_to_omit, self.poutput)
+        except Exception, e:
+            self.poutput('Error {}.  Error:{}'.format(device_id, e))
+
+    def do_img_dnld_list(self, line):
+        """
+        List all image download records for a given device
+        """
+        device = self.get_device(depth=-1)
+        device_id = device.id
+        self.poutput('Get all img dnld records {}'.format(device_id))
+        try:
+            stub = self.get_stub()
+            img_dnlds = stub.ListImageDownloads(voltha_pb2.ID(id=device_id))
+        except Exception, e:
+            self.poutput('Error list img dnlds {}.  Error:{}'.format(device_id, e))
+            return
+        fields_to_omit = {
+              'crc',
+              'local_dir',
+        }
+        try:
+            print_pb_list_as_table('ImageDownloads:', img_dnlds.items, fields_to_omit, self.poutput)
+        except Exception, e:
+            self.poutput('Error {}.  Error:{}'.format(device_id, e))
+
+
+    @options([
+        make_option('-n', '--name', action='store', dest='name',
+                    help="Image name"),
+    ])
+    def do_img_dnld_cancel(self, line, opts):
+        """
+        Cancel a requested image download
+        """
+        device = self.get_device(depth=-1)
+        self.poutput('device_id {}'.format(device.id))
+        self.poutput('name {}'.format(opts.name))
+        device_id = device.id
+        try:
+            if device_id and opts.name:
+                kw = dict(id=device_id)
+                kw['name'] = opts.name
+            else:
+                self.poutput('Device ID, Image Name are needed')
+                raise Exception('Device ID, Image Name are needed')
+        except Exception as e:
+            self.poutput('Error cancel sw dnld {}. Error:{}'.format(device_id, e))
+            return
+        response = None
+        try:
+            img_dnld = voltha_pb2.ImageDownload(**kw)
+            stub = self.get_stub()
+            img_dnld = stub.GetImageDownload(img_dnld)
+            response = stub.CancelImageDownload(img_dnld)
+        except Exception as e:
+            self.poutput('Error cancel sw dnld {}. Error:{}'.format(device_id, e))
+            return
+        name = enum2name(common_pb2.OperationResp,
+                        'OperationReturnCode', response.code)
+        self.poutput('response: {}'.format(name))
+        self.poutput('{}'.format(response))
+
+    @options([
+        make_option('-n', '--name', action='store', dest='name',
+                    help="Image name"),
+        make_option('-s', '--save', action='store', dest='save_config',
+                    help="Save Config", default="True"),
+        make_option('-d', '--dir', action='store', dest='local_dir',
+                    help="Image on device location"),
+    ])
+    def do_img_activate(self, line, opts):
+        """
+        Activate an image update on device
+        """
+        device = self.get_device(depth=-1)
+        device_id = device.id
+        try:
+            if device_id and opts.name and opts.local_dir:
+                kw = dict(id=device_id)
+                kw['name'] = opts.name
+                kw['local_dir'] = opts.local_dir
+            else:
+                self.poutput('Device ID, Image Name, and Location are needed')
+                raise Exception('Device ID, Image Name, and Location are needed')
+        except Exception as e:
+            self.poutput('Error activate image {}. Error:{}'.format(device_id, e))
+            return
+        kw['save_config'] = json.loads(opts.save_config.lower())
+        self.poutput('activate image update {} {} {} {}'.format( \
+                    kw['id'], kw['name'],
+                    kw['local_dir'], kw['save_config']))
+        response = None
+        try:
+            img_dnld = voltha_pb2.ImageDownload(**kw)
+            stub = self.get_stub()
+            img_dnld = stub.GetImageDownload(img_dnld)
+            response = stub.ActivateImageUpdate(img_dnld)
+        except Exception as e:
+            self.poutput('Error activate image {}. Error:{}'.format(kw['id'], e))
+            return
+        name = enum2name(common_pb2.OperationResp,
+                        'OperationReturnCode', response.code)
+        self.poutput('response: {}'.format(name))
+        self.poutput('{}'.format(response))
+
+    @options([
+        make_option('-n', '--name', action='store', dest='name',
+                    help="Image name"),
+        make_option('-s', '--save', action='store', dest='save_config',
+                    help="Save Config", default="True"),
+        make_option('-d', '--dir', action='store', dest='local_dir',
+                    help="Image on device location"),
+    ])
+    def do_img_revert(self, line, opts):
+        """
+        Revert an image update on device
+        """
+        device = self.get_device(depth=-1)
+        device_id = device.id
+        try:
+            if device_id and opts.name and opts.local_dir:
+                kw = dict(id=device_id)
+                kw['name'] = opts.name
+                kw['local_dir'] = opts.local_dir
+            else:
+                self.poutput('Device ID, Image Name, and Location are needed')
+                raise Exception('Device ID, Image Name, and Location are needed')
+        except Exception as e:
+            self.poutput('Error revert image {}. Error:{}'.format(device_id, e))
+            return
+        kw['save_config'] = json.loads(opts.save_config.lower())
+        self.poutput('revert image update {} {} {} {}'.format( \
+                    kw['id'], kw['name'],
+                    kw['local_dir'], kw['save_config']))
+        response = None
+        try:
+            img_dnld = voltha_pb2.ImageDownload(**kw)
+            stub = self.get_stub()
+            img_dnld = stub.GetImageDownload(img_dnld)
+            response = stub.RevertImageUpdate(img_dnld)
+        except Exception as e:
+            self.poutput('Error revert image {}. Error:{}'.format(kw['id'], e))
+            return
+        name = enum2name(common_pb2.OperationResp,
+                        'OperationReturnCode', response.code)
+        self.poutput('response: {}'.format(name))
+        self.poutput('{}'.format(response))
diff --git a/python/cli/logical_device.py b/python/cli/logical_device.py
new file mode 100644
index 0000000..cd991c6
--- /dev/null
+++ b/python/cli/logical_device.py
@@ -0,0 +1,119 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Logical device level CLI commands
+"""
+from cmd2 import Cmd
+from simplejson import dumps
+
+from table import print_pb_as_table, print_pb_list_as_table
+from utils import pb2dict
+from utils import print_flows, print_groups
+from python.protos import third_party
+from google.protobuf.empty_pb2 import Empty
+
+_ = third_party
+from python.protos import voltha_pb2
+
+
+class LogicalDeviceCli(Cmd):
+
+    def __init__(self, logical_device_id, get_stub):
+        Cmd.__init__(self)
+        self.get_stub = get_stub
+        self.logical_device_id = logical_device_id
+        self.prompt = '(' + self.colorize(
+            self.colorize('logical device {}'.format(logical_device_id), 'red'),
+            'bold') + ') '
+
+    def cmdloop(self):
+        self._cmdloop()
+
+    def get_logical_device(self, depth=0):
+        stub = self.get_stub()
+        res = stub.GetLogicalDevice(voltha_pb2.ID(id=self.logical_device_id),
+                                    metadata=(('get-depth', str(depth)), ))
+        return res
+
+    def get_device(self, id):
+        stub = self.get_stub()
+        return stub.GetDevice(voltha_pb2.ID(id=id))
+
+    def get_devices(self):
+        stub = self.get_stub()
+        res = stub.ListDevices(Empty())
+        return res.items
+
+    do_exit = Cmd.do_quit
+
+    def do_show(self, _):
+        """Show detailed logical device information"""
+        print_pb_as_table('Logical device {}'.format(self.logical_device_id),
+                          self.get_logical_device(depth=-1))
+
+    def do_ports(self, _):
+        """Show ports of logical device"""
+        device = self.get_logical_device(depth=-1)
+        omit_fields = {
+            'ofp_port.advertised',
+            'ofp_port.peer',
+            'ofp_port.max_speed'
+        }
+        print_pb_list_as_table('Logical device ports:', device.ports,
+                               omit_fields, self.poutput)
+
+    def do_flows(self, _):
+        """Show flow table for logical device"""
+        logical_device = pb2dict(self.get_logical_device(-1))
+        print_flows(
+            'Logical Device',
+            self.logical_device_id,
+            type='n/a',
+            flows=logical_device['flows']['items'],
+            groups=logical_device['flow_groups']['items']
+        )
+
+    def do_groups(self, _):
+        """Show flow group table for logical device"""
+        logical_device = pb2dict(self.get_logical_device(-1))
+        print_groups(
+            'Logical Device',
+            self.logical_device_id,
+            type='n/a',
+            groups=logical_device['flow_groups']['items']
+        )
+
+    def do_devices(self, line):
+        """List devices that belong to this logical device"""
+        logical_device = self.get_logical_device()
+        root_device_id = logical_device.root_device_id
+        devices = [self.get_device(root_device_id)]
+        for d in self.get_devices():
+            if d.parent_id == root_device_id:
+                devices.append(d)
+        omit_fields = {
+            'adapter',
+            'vendor',
+            'model',
+            'hardware_version',
+            'software_version',
+            'firmware_version',
+            'serial_number'
+        }
+        print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
diff --git a/python/cli/main.py b/python/cli/main.py
new file mode 100755
index 0000000..0348f66
--- /dev/null
+++ b/python/cli/main.py
@@ -0,0 +1,922 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import argparse
+import os
+import readline
+import sys
+from optparse import make_option
+from time import sleep, time
+
+import grpc
+import requests
+from cmd2 import Cmd, options
+from consul import Consul
+from google.protobuf.empty_pb2 import Empty
+from simplejson import dumps
+
+from device import DeviceCli
+from omci import OmciCli
+from alarm_filters import AlarmFiltersCli
+from logical_device import LogicalDeviceCli
+from table import print_pb_list_as_table
+from python.common.openflow.utils import *
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.openflow_13_pb2 import FlowTableUpdate, FlowGroupTableUpdate
+
+_ = third_party
+from python.cli.utils import pb2dict
+
+defs = dict(
+    # config=os.environ.get('CONFIG', './cli.yml'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    voltha_grpc_endpoint=os.environ.get('VOLTHA_GRPC_ENDPOINT',
+                                        'localhost:50057'),
+    voltha_sim_rest_endpoint=os.environ.get('VOLTHA_SIM_REST_ENDPOINT',
+                                            'localhost:18880'),
+    global_request=os.environ.get('GLOBAL_REQUEST', False)
+)
+
+banner = """\
+         _ _   _            ___ _    ___
+__ _____| | |_| |_  __ _   / __| |  |_ _|
+\ V / _ \ |  _| ' \/ _` | | (__| |__ | |
+ \_/\___/_|\__|_||_\__,_|  \___|____|___|
+(to exit type quit or hit Ctrl-D)
+"""
+
+
+class VolthaCli(Cmd):
+    prompt = 'voltha'
+    history_file_name = '.voltha_cli_history'
+
+    # Settable CLI parameters
+    voltha_grpc = 'localhost:50057'
+    voltha_sim_rest = 'localhost:18880'
+    global_request = False
+    max_history_lines = 500
+    default_device_id = None
+    default_logical_device_id = None
+
+    Cmd.settable.update(dict(
+        voltha_grpc='Voltha GRPC endpoint in form of <host>:<port>',
+        voltha_sim_rest='Voltha simulation back door for testing in form '
+                        'of <host>:<port>',
+        max_history_lines='Maximum number of history lines stored across '
+                          'sessions',
+        default_device_id='Device id used when no device id is specified',
+        default_logical_device_id='Logical device id used when no device id '
+                                  'is specified',
+    ))
+
+    # cleanup of superfluous commands from cmd2
+    del Cmd.do_cmdenvironment
+    del Cmd.do_load
+    del Cmd.do__relative_load
+
+    def __init__(self, voltha_grpc, voltha_sim_rest, global_request=False):
+
+        VolthaCli.voltha_grpc = "localhost:50057"
+        # VolthaCli.voltha_grpc = voltha_grpc
+        VolthaCli.voltha_sim_rest = voltha_sim_rest
+        VolthaCli.global_request = global_request
+        Cmd.__init__(self)
+        self.prompt = '(' + self.colorize(
+            self.colorize(self.prompt, 'blue'), 'bold') + ') '
+        self.channel = None
+        self.stub = None
+        self.device_ids_cache = None
+        self.device_ids_cache_ts = time()
+        self.logical_device_ids_cache = None
+        self.logical_device_ids_cache_ts = time()
+
+    # we override cmd2's method to avoid its optparse conflicting with our
+    # command line parsing
+    def cmdloop(self):
+        self._cmdloop()
+
+    def load_history(self):
+        """Load saved command history from local history file"""
+        try:
+            with file(self.history_file_name, 'r') as f:
+                for line in f.readlines():
+                    stripped_line = line.strip()
+                    self.history.append(stripped_line)
+                    readline.add_history(stripped_line)
+        except IOError:
+            pass  # ignore if file cannot be read
+
+    def save_history(self):
+        try:
+            with open(self.history_file_name, 'w') as f:
+                f.write('\n'.join(self.history[-self.max_history_lines:]))
+        except IOError as e:
+            self.perror('Could not save history in {}: {}'.format(
+                self.history_file_name, e))
+        else:
+            self.poutput('History saved as {}'.format(
+                self.history_file_name))
+
+    def perror(self, errmsg, statement=None):
+        # Touch it up to make sure error is prefixed and colored
+        Cmd.perror(self, self.colorize('***ERROR: ', 'red') + errmsg,
+                   statement)
+
+    def get_channel(self):
+        if self.channel is None:
+            self.channel = grpc.insecure_channel(self.voltha_grpc)
+        return self.channel
+
+    def get_stub(self):
+        if self.stub is None:
+            self.stub = voltha_pb2.VolthaServiceStub(self.get_channel())
+            # self.stub = \
+            #     voltha_pb2.VolthaGlobalServiceStub(self.get_channel()) \
+            #         if self.global_request else \
+            #             voltha_pb2.VolthaLocalServiceStub(self.get_channel())
+        return self.stub
+
+    # ~~~~~~~~~~~~~~~~~ ACTUAL COMMAND IMPLEMENTATIONS ~~~~~~~~~~~~~~~~~~~~~~~~
+
+    def do_reset_history(self, line):
+        """Reset CLI history"""
+        while self.history:
+            self.history.pop()
+
+    def do_launch(self, line):
+        """If Voltha is not running yet, launch it"""
+        raise NotImplementedError('not implemented yet')
+
+    def do_restart(self, line):
+        """Launch Voltha, but if it is already running, terminate it first"""
+        pass
+
+    def do_adapters(self, line):
+        """List loaded adapter"""
+        stub = self.get_stub()
+        res = stub.ListAdapters(Empty())
+        omit_fields = {'config.log_level', 'logical_device_ids'}
+        print_pb_list_as_table('Adapters:', res.items, omit_fields, self.poutput)
+
+    def get_devices(self):
+        stub = self.get_stub()
+        res = stub.ListDevices(Empty())
+        return res.items
+
+    def get_logical_devices(self):
+        stub = self.get_stub()
+        res = stub.ListLogicalDevices(Empty())
+        return res.items
+
+    def do_devices(self, line):
+        """List devices registered in Voltha"""
+        devices = self.get_devices()
+        omit_fields = {
+            'adapter',
+            'vendor',
+            'model',
+            'hardware_version',
+            'images',
+            'firmware_version',
+            'vendor_id'
+        }
+        print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
+    def do_logical_devices(self, line):
+        """List logical devices in Voltha"""
+        stub = self.get_stub()
+        res = stub.ListLogicalDevices(Empty())
+        omit_fields = {
+            'desc.mfr_desc',
+            'desc.hw_desc',
+            'desc.sw_desc',
+            'desc.dp_desc',
+            'desc.serial_number',
+            'switch_features.capabilities'
+        }
+        presfns = {
+            'datapath_id': lambda x: "{0:0{1}x}".format(int(x), 16)
+        }
+        print_pb_list_as_table('Logical devices:', res.items, omit_fields,
+                               self.poutput, presfns=presfns)
+
+    def do_device(self, line):
+        """Enter device level command mode"""
+        device_id = line.strip() or self.default_device_id
+        if not device_id:
+            raise Exception('<device-id> parameter needed')
+        if device_id not in self.device_ids():
+            self.poutput( self.colorize('Error: ', 'red') +
+                            'There is no such device')
+            raise Exception('<device-id> is not a valid one')
+        sub = DeviceCli(device_id, self.get_stub)
+        sub.cmdloop()
+
+    def do_logical_device(self, line):
+        """Enter logical device level command mode"""
+        logical_device_id = line.strip() or self.default_logical_device_id
+        if not logical_device_id:
+            raise Exception('<logical-device-id> parameter needed')
+        if logical_device_id not in self.logical_device_ids():
+            self.poutput( self.colorize('Error: ', 'red') +
+                            'There is no such device')
+            raise Exception('<logical-device-id> is not a valid one')
+        sub = LogicalDeviceCli(logical_device_id, self.get_stub)
+        sub.cmdloop()
+
+    def device_ids(self, force_refresh=False):
+        if force_refresh or self.device_ids is None or \
+                        (time() - self.device_ids_cache_ts) > 1:
+            self.device_ids_cache = [d.id for d in self.get_devices()]
+            self.device_ids_cache_ts = time()
+        return self.device_ids_cache
+
+    def logical_device_ids(self, force_refresh=False):
+        if force_refresh or self.logical_device_ids is None or \
+                        (time() - self.logical_device_ids_cache_ts) > 1:
+            self.logical_device_ids_cache = [d.id for d
+                                             in self.get_logical_devices()]
+            self.logical_device_ids_cache_ts = time()
+        return self.logical_device_ids_cache
+
+    def complete_device(self, text, line, begidx, endidx):
+        if not text:
+            completions = self.device_ids()[:]
+        else:
+            completions = [d for d in self.device_ids() if d.startswith(text)]
+        return completions
+
+    def complete_logical_device(self, text, line, begidx, endidx):
+        if not text:
+            completions = self.logical_device_ids()[:]
+        else:
+            completions = [d for d in self.logical_device_ids()
+                           if d.startswith(text)]
+        return completions
+
+    def do_xpon(self, line):
+        """xpon <optional> [device_ID] - Enter xpon level command mode"""
+        device_id = line.strip()
+        if device_id:
+            stub = self.get_stub()
+            try:
+                res = stub.GetDevice(voltha_pb2.ID(id=device_id))
+            except Exception:
+                self.poutput(
+                    self.colorize('Error: ', 'red') + 'No device id ' +
+                    self.colorize(device_id, 'blue') + ' is found')
+                return
+        sub = XponCli(self.get_channel, device_id)
+        sub.cmdloop()
+
+    def do_omci(self, line):
+        """omci <device_ID> - Enter OMCI level command mode"""
+
+        device_id = line.strip() or self.default_device_id
+        if not device_id:
+            raise Exception('<device-id> parameter needed')
+        sub = OmciCli(device_id, self.get_stub)
+        sub.cmdloop()
+
+    def do_pdb(self, line):
+        """Launch PDB debug prompt in CLI (for CLI development)"""
+        from pdb import set_trace
+        set_trace()
+
+    def do_version(self, line):
+        """Show the VOLTHA core version"""
+        stub = self.get_stub()
+        voltha = stub.GetVoltha(Empty())
+        self.poutput('{}'.format(voltha.version))
+
+    def do_health(self, line):
+        """Show connectivity status to Voltha status"""
+        stub = voltha_pb2.HealthServiceStub(self.get_channel())
+        res = stub.GetHealthStatus(Empty())
+        self.poutput(dumps(pb2dict(res), indent=4))
+
+    @options([
+        make_option('-t', '--device-type', action="store", dest='device_type',
+                    help="Device type", default='simulated_olt'),
+        make_option('-m', '--mac-address', action='store', dest='mac_address',
+                    default='00:0c:e2:31:40:00'),
+        make_option('-i', '--ip-address', action='store', dest='ip_address'),
+        make_option('-H', '--host_and_port', action='store',
+                    dest='host_and_port'),
+    ])
+    def do_preprovision_olt(self, line, opts):
+        """Preprovision a new OLT with given device type"""
+        stub = self.get_stub()
+        kw = dict(type=opts.device_type)
+        if opts.host_and_port:
+            kw['host_and_port'] = opts.host_and_port
+        elif opts.ip_address:
+            kw['ipv4_address'] = opts.ip_address
+        elif opts.mac_address:
+            kw['mac_address'] = opts.mac_address.lower()
+        else:
+            raise Exception('Either IP address or Mac Address is needed')
+        # Pass any extra arguments past '--' to the device as custom arguments
+        kw['extra_args'] = line
+
+        device = voltha_pb2.Device(**kw)
+        device = stub.CreateDevice(device)
+        self.poutput('success (device id = {})'.format(device.id))
+        self.default_device_id = device.id
+
+    def do_enable(self, line):
+        """
+        Enable a device. If the <id> is not provided, it will be on the last
+        pre-provisioned device.
+        """
+        device_id = line or self.default_device_id
+        if device_id not in self.device_ids():
+            self.poutput('Error: There is no such preprovisioned device')
+            return
+
+        try:
+            stub = self.get_stub()
+            device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+            if device.admin_state == voltha_pb2.AdminState.ENABLED:
+                if device.oper_status != voltha_pb2.OperStatus.ACTIVATING:
+                    self.poutput('Error: Device is already enabled')
+                    return
+            else:
+                stub.EnableDevice(voltha_pb2.ID(id=device_id))
+                self.poutput('enabling {}'.format(device_id))
+
+            while True:
+                device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+                # If this is an OLT then acquire logical device id
+                if device.oper_status == voltha_pb2.OperStatus.ACTIVE:
+                    if device.type.endswith('_olt'):
+                        assert device.parent_id
+                        self.default_logical_device_id = device.parent_id
+                        self.poutput('success (logical device id = {})'.format(
+                            self.default_logical_device_id))
+                    else:
+                        self.poutput('success (device id = {})'.format(device.id))
+                    break
+                self.poutput('waiting for device to be enabled...')
+                sleep(.5)
+        except Exception as e:
+            self.poutput('Error enabling {}.  Error:{}'.format(device_id, e))
+
+    complete_activate_olt = complete_device
+
+    def do_reboot(self, line):
+        """
+        Rebooting a device. ID of the device needs to be provided
+        """
+        device_id = line or self.default_device_id
+        self.poutput('rebooting {}'.format(device_id))
+        try:
+            stub = self.get_stub()
+            stub.RebootDevice(voltha_pb2.ID(id=device_id))
+            self.poutput('rebooted {}'.format(device_id))
+        except Exception as e:
+            self.poutput('Error rebooting {}.  Error:{}'.format(device_id, e))
+
+    def do_self_test(self, line):
+        """
+        Self Test a device. ID of the device needs to be provided
+        """
+        device_id = line or self.default_device_id
+        self.poutput('Self Testing {}'.format(device_id))
+        try:
+            stub = self.get_stub()
+            res = stub.SelfTest(voltha_pb2.ID(id=device_id))
+            self.poutput('Self Tested {}'.format(device_id))
+            self.poutput(dumps(pb2dict(res), indent=4))
+        except Exception as e:
+            self.poutput('Error in self test {}.  Error:{}'.format(device_id, e))
+
+    def do_delete(self, line):
+        """
+        Deleting a device. ID of the device needs to be provided
+        """
+        device_id = line or self.default_device_id
+        self.poutput('deleting {}'.format(device_id))
+        try:
+            stub = self.get_stub()
+            stub.DeleteDevice(voltha_pb2.ID(id=device_id))
+            self.poutput('deleted {}'.format(device_id))
+        except Exception as e:
+            self.poutput('Error deleting {}.  Error:{}'.format(device_id, e))
+
+    def do_disable(self, line):
+        """
+        Disable a device. ID of the device needs to be provided
+        """
+        device_id = line
+        if device_id not in self.device_ids():
+            self.poutput('Error: There is no such device')
+            return
+        try:
+            stub = self.get_stub()
+            device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+            if device.admin_state == voltha_pb2.AdminState.DISABLED:
+                self.poutput('Error: Device is already disabled')
+                return
+            stub.DisableDevice(voltha_pb2.ID(id=device_id))
+            self.poutput('disabling {}'.format(device_id))
+
+            # Do device query and verify that the device admin status is
+            # DISABLED and Operational Status is unknown
+            device = stub.GetDevice(voltha_pb2.ID(id=device_id))
+            if device.admin_state == voltha_pb2.AdminState.DISABLED:
+                self.poutput('disabled successfully {}'.format(device_id))
+            else:
+                self.poutput('disabling failed {}.  Admin State:{} '
+                             'Operation State: {}'.format(device_id,
+                                                          device.admin_state,
+                                                          device.oper_status))
+        except Exception as e:
+            self.poutput('Error disabling {}.  Error:{}'.format(device_id, e))
+
+    def do_test(self, line):
+        """Enter test mode, which makes a bunch on new commands available"""
+        sub = TestCli(self.history, self.voltha_grpc,
+                      self.get_stub, self.voltha_sim_rest)
+        sub.cmdloop()
+
+    def do_alarm_filters(self, line):
+        sub = AlarmFiltersCli(self.get_stub)
+        sub.cmdloop()
+
+
+class TestCli(VolthaCli):
+    def __init__(self, history, voltha_grpc, get_stub, voltha_sim_rest):
+        VolthaCli.__init__(self, voltha_grpc, voltha_sim_rest)
+        self.history = history
+        self.get_stub = get_stub
+        self.prompt = '(' + self.colorize(self.colorize('test', 'cyan'),
+                                          'bold') + ') '
+
+    def get_device(self, device_id, depth=0):
+        stub = self.get_stub()
+        res = stub.GetDevice(voltha_pb2.ID(id=device_id),
+                             metadata=(('get-depth', str(depth)),))
+        return res
+
+    def do_arrive_onus(self, line):
+        """
+        Simulate the arrival of ONUs (available only on simulated_olt)
+        """
+        device_id = line or self.default_device_id
+
+        # verify that device is of type simulated_olt
+        device = self.get_device(device_id)
+        assert device.type == 'simulated_olt', (
+            'Cannot use it on this device type (only on simulated_olt type)')
+
+        requests.get('http://{}/devices/{}/detect_onus'.format(
+            self.voltha_sim_rest, device_id
+        ))
+
+    complete_arrive_onus = VolthaCli.complete_device
+
+    def get_logical_ports(self, logical_device_id):
+        """
+        Return the NNI port number and the first usable UNI port of logical
+        device, and the vlan associated with the latter.
+        """
+        stub = self.get_stub()
+        ports = stub.ListLogicalDevicePorts(
+            voltha_pb2.ID(id=logical_device_id)).items
+        nni = None
+        unis = []
+        for port in ports:
+            if port.root_port:
+                assert nni is None, "There shall be only one root port"
+                nni = port.ofp_port.port_no
+            else:
+                uni = port.ofp_port.port_no
+                uni_device = self.get_device(port.device_id)
+                vlan = uni_device.vlan
+                unis.append((uni, vlan))
+
+        assert nni is not None, "No NNI port found"
+        assert unis, "Not a single UNI?"
+
+        return nni, unis
+
+    def do_install_eapol_flow(self, line):
+        """
+        Install an EAPOL flow on the given logical device. If device is not
+        given, it will be applied to logical device of the last pre-provisioned
+        OLT device.
+        """
+
+        logical_device_id = line or self.default_logical_device_id
+
+        # gather NNI and UNI port IDs
+        nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+        # construct and push flow rule
+        stub = self.get_stub()
+        print "I am now here", unis
+        for uni_port_no, _ in unis:
+            update = FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=2000,
+                    match_fields=[in_port(uni_port_no), eth_type(0x888e)],
+                    actions=[
+                        # push_vlan(0x8100),
+                        # set_field(vlan_vid(4096 + 4000)),
+                        output(ofp.OFPP_CONTROLLER)
+                    ]
+                )
+            )
+            print "I am now here"
+            res = stub.UpdateLogicalDeviceFlowTable(update)
+            self.poutput('success for uni {} ({})'.format(uni_port_no, res))
+
+    complete_install_eapol_flow = VolthaCli.complete_logical_device
+
+    def do_install_all_controller_bound_flows(self, line):
+        """
+        Install all flow rules for controller bound flows, including EAPOL,
+        IGMP and DHCP. If device is not given, it will be applied to logical
+        device of the last pre-provisioned OLT device.
+        """
+        logical_device_id = line or self.default_logical_device_id
+
+        # gather NNI and UNI port IDs
+        nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+        # construct and push flow rules
+        stub = self.get_stub()
+
+        for uni_port_no, _ in unis:
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=2000,
+                    match_fields=[
+                        in_port(uni_port_no),
+                        eth_type(0x888e)
+                    ],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=1000,
+                    match_fields=[
+                        in_port(uni_port_no),
+                        eth_type(0x800),
+                        ip_proto(2)
+                    ],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=1000,
+                    match_fields=[
+                        in_port(uni_port_no),
+                        eth_type(0x800),
+                        ip_proto(17),
+                        udp_dst(67)
+                    ],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+        self.poutput('success')
+
+    complete_install_all_controller_bound_flows = \
+        VolthaCli.complete_logical_device
+
+    def do_install_all_sample_flows(self, line):
+        """
+        Install all flows that are representative of the virtualized access
+        scenario in a PON network.
+        """
+        logical_device_id = line or self.default_logical_device_id
+
+        # gather NNI and UNI port IDs
+        nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+        # construct and push flow rules
+        stub = self.get_stub()
+
+        for uni_port_no, c_vid in unis:
+            # Controller-bound flows
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=2000,
+                    match_fields=[in_port(uni_port_no), eth_type(0x888e)],
+                    actions=[
+                        # push_vlan(0x8100),
+                        # set_field(vlan_vid(4096 + 4000)),
+                        output(ofp.OFPP_CONTROLLER)
+                    ]
+                )
+            ))
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=1000,
+                    match_fields=[eth_type(0x800), ip_proto(2)],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=1000,
+                    match_fields=[eth_type(0x800), ip_proto(17), udp_dst(67)],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+
+            # Unicast flows:
+            # Downstream flow 1
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=500,
+                    match_fields=[
+                        in_port(nni_port_no),
+                        vlan_vid(4096 + 1000),
+                        metadata(c_vid)  # here to mimic an ONOS artifact
+                    ],
+                    actions=[pop_vlan()],
+                    next_table_id=1
+                )
+            ))
+            # Downstream flow 2
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=500,
+                    table_id=1,
+                    match_fields=[in_port(nni_port_no), vlan_vid(4096 + c_vid)],
+                    actions=[set_field(vlan_vid(4096 + 0)), output(uni_port_no)]
+                )
+            ))
+            # Upstream flow 1 for 0-tagged case
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=500,
+                    match_fields=[in_port(uni_port_no), vlan_vid(4096 + 0)],
+                    actions=[set_field(vlan_vid(4096 + c_vid))],
+                    next_table_id=1
+                )
+            ))
+            # Upstream flow 1 for untagged case
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=500,
+                    match_fields=[in_port(uni_port_no), vlan_vid(0)],
+                    actions=[push_vlan(0x8100), set_field(vlan_vid(4096 + c_vid))],
+                    next_table_id=1
+                )
+            ))
+            # Upstream flow 2 for s-tag
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=500,
+                    table_id=1,
+                    match_fields=[in_port(uni_port_no), vlan_vid(4096 + c_vid)],
+                    actions=[
+                        push_vlan(0x8100),
+                        set_field(vlan_vid(4096 + 1000)),
+                        output(nni_port_no)
+                    ]
+                )
+            ))
+
+        # Push a few multicast flows
+        # 1st with one bucket for our uni 0
+        stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+            id=logical_device_id,
+            group_mod=mk_multicast_group_mod(
+                group_id=1,
+                buckets=[
+                    ofp.ofp_bucket(actions=[
+                        pop_vlan(),
+                        output(unis[0][0])
+                    ])
+                ]
+            )
+        ))
+        stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+            id=logical_device_id,
+            flow_mod=mk_simple_flow_mod(
+                priority=1000,
+                match_fields=[
+                    in_port(nni_port_no),
+                    eth_type(0x800),
+                    vlan_vid(4096 + 140),
+                    ipv4_dst(0xe4010101)
+                ],
+                actions=[group(1)]
+            )
+        ))
+
+        # 2nd with one bucket for uni 0 and 1
+        stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+            id=logical_device_id,
+            group_mod=mk_multicast_group_mod(
+                group_id=2,
+                buckets=[
+                    ofp.ofp_bucket(actions=[pop_vlan(), output(unis[0][0])])
+                    #                    ofp.ofp_bucket(actions=[pop_vlan(), output(unis[1][0])])
+                ]
+            )
+        ))
+        stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+            id=logical_device_id,
+            flow_mod=mk_simple_flow_mod(
+                priority=1000,
+                match_fields=[
+                    in_port(nni_port_no),
+                    eth_type(0x800),
+                    vlan_vid(4096 + 140),
+                    ipv4_dst(0xe4020202)
+                ],
+                actions=[group(2)]
+            )
+        ))
+
+        # 3rd with empty bucket
+        stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+            id=logical_device_id,
+            group_mod=mk_multicast_group_mod(
+                group_id=3,
+                buckets=[]
+            )
+        ))
+        stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+            id=logical_device_id,
+            flow_mod=mk_simple_flow_mod(
+                priority=1000,
+                match_fields=[
+                    in_port(nni_port_no),
+                    eth_type(0x800),
+                    vlan_vid(4096 + 140),
+                    ipv4_dst(0xe4030303)
+                ],
+                actions=[group(3)]
+            )
+        ))
+
+        self.poutput('success')
+
+    complete_install_all_sample_flows = VolthaCli.complete_logical_device
+
+    def do_install_dhcp_flows(self, line):
+        """
+        Install all dhcp flows that are representative of the virtualized access
+        scenario in a PON network.
+        """
+        logical_device_id = line or self.default_logical_device_id
+
+        # gather NNI and UNI port IDs
+        nni_port_no, unis = self.get_logical_ports(logical_device_id)
+
+        # construct and push flow rules
+        stub = self.get_stub()
+
+        # Controller-bound flows
+        for uni_port_no, _ in unis:
+            stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+                id=logical_device_id,
+                flow_mod=mk_simple_flow_mod(
+                    priority=1000,
+                    match_fields=[
+                        in_port(uni_port_no),
+                        eth_type(0x800),
+                        ip_proto(17),
+                        udp_dst(67)
+                    ],
+                    actions=[output(ofp.OFPP_CONTROLLER)]
+                )
+            ))
+
+        self.poutput('success')
+
+    complete_install_dhcp_flows = VolthaCli.complete_logical_device
+
+    def do_delete_all_flows(self, line):
+        """
+        Remove all flows and flow groups from given logical device
+        """
+        logical_device_id = line or self.default_logical_device_id
+        stub = self.get_stub()
+        stub.UpdateLogicalDeviceFlowTable(FlowTableUpdate(
+            id=logical_device_id,
+            flow_mod=ofp.ofp_flow_mod(
+                command=ofp.OFPFC_DELETE,
+                table_id=ofp.OFPTT_ALL,
+                cookie_mask=0,
+                out_port=ofp.OFPP_ANY,
+                out_group=ofp.OFPG_ANY
+            )
+        ))
+        stub.UpdateLogicalDeviceFlowGroupTable(FlowGroupTableUpdate(
+            id=logical_device_id,
+            group_mod=ofp.ofp_group_mod(
+                command=ofp.OFPGC_DELETE,
+                group_id=ofp.OFPG_ALL
+            )
+        ))
+        self.poutput('success')
+
+    complete_delete_all_flows = VolthaCli.complete_logical_device
+
+    def do_send_simulated_upstream_eapol(self, line):
+        """
+        Send an EAPOL upstream from a simulated OLT
+        """
+        device_id = line or self.default_device_id
+        requests.get('http://{}/devices/{}/test_eapol_in'.format(
+            self.voltha_sim_rest, device_id
+        ))
+
+    complete_send_simulated_upstream_eapol = VolthaCli.complete_device
+
+    def do_inject_eapol_start(self, line):
+        """
+        Send out an an EAPOL start message into the given Unix interface
+        """
+        pass
+
+
+if __name__ == '__main__':
+
+    parser = argparse.ArgumentParser()
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument(
+        '-C', '--consul', action='store', default=defs['consul'], help=_help)
+
+    _help = 'Lookup Voltha endpoints based on service entries in Consul'
+    parser.add_argument(
+        '-L', '--lookup', action='store_true', help=_help)
+
+    _help = 'All requests to the Voltha gRPC service are global'
+    parser.add_argument(
+        '-G', '--global_request', action='store_true', help=_help)
+
+    _help = '<hostname>:<port> of Voltha gRPC service (default={})'.format(
+        defs['voltha_grpc_endpoint'])
+    parser.add_argument('-g', '--grpc-endpoint', action='store',
+                        default=defs['voltha_grpc_endpoint'], help=_help)
+
+    _help = '<hostname>:<port> of Voltha simulated adapter backend for ' \
+            'testing (default={})'.format(
+        defs['voltha_sim_rest_endpoint'])
+    parser.add_argument('-s', '--sim-rest-endpoint', action='store',
+                        default=defs['voltha_sim_rest_endpoint'], help=_help)
+
+    args = parser.parse_args()
+
+    if args.lookup:
+        host = args.consul.split(':')[0].strip()
+        port = int(args.consul.split(':')[1].strip())
+        consul = Consul(host=host, port=port)
+
+        _, services = consul.catalog.service('voltha-grpc')
+        if not services:
+            print('No voltha-grpc service registered in consul; exiting')
+            sys.exit(1)
+        args.grpc_endpoint = '{}:{}'.format(services[0]['ServiceAddress'],
+                                            services[0]['ServicePort'])
+
+        _, services = consul.catalog.service('voltha-sim-rest')
+        if not services:
+            print('No voltha-sim-rest service registered in consul; exiting')
+            sys.exit(1)
+        args.sim_rest_endpoint = '{}:{}'.format(services[0]['ServiceAddress'],
+                                                services[0]['ServicePort'])
+
+    c = VolthaCli(args.grpc_endpoint, args.sim_rest_endpoint,
+                  args.global_request)
+    c.poutput(banner)
+    c.load_history()
+    c.cmdloop()
+    c.save_history()
diff --git a/python/cli/omci.py b/python/cli/omci.py
new file mode 100644
index 0000000..d8b8334
--- /dev/null
+++ b/python/cli/omci.py
@@ -0,0 +1,356 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+OpenOMCI level CLI commands
+"""
+from optparse import make_option
+from cmd2 import Cmd, options
+from datetime import datetime
+from google.protobuf.empty_pb2 import Empty
+from table import print_pb_list_as_table
+from python.protos import third_party
+from python.protos import voltha_pb2
+from python.protos.omci_mib_db_pb2 import MibDeviceData, MibClassData, \
+    MibInstanceData
+from os import linesep
+
+_ = third_party
+
+
+class OmciCli(Cmd):
+    CREATED_KEY = 'created'
+    MODIFIED_KEY = 'modified'
+    MDS_KEY = 'mib_data_sync'
+    LAST_SYNC_KEY = 'last_mib_sync'
+    VERSION_KEY = 'version'
+    DEVICE_ID_KEY = 'device_id'
+    CLASS_ID_KEY = 'class_id'
+    INSTANCE_ID_KEY = 'instance_id'
+    ATTRIBUTES_KEY = 'attributes'
+    TIME_FORMAT = '%Y%m%d-%H%M%S.%f'
+    ME_KEY = 'managed_entities'
+    MSG_TYPE_KEY = 'message_types'
+
+    MSG_TYPE_TO_NAME = {
+        4: 'Create',
+        5: 'Create Complete',
+        6: 'Delete',
+        8: 'Set',
+        9: 'Get',
+        10: 'Get Complete',
+        11: 'Get All Alarms',
+        12: 'Get All Alarms Next',
+        13: 'Mib Upload',
+        14: 'Mib Upload Next',
+        15: 'Mib Reset',
+        16: 'Alarm Notification',
+        17: 'Attribute Value Change',
+        18: 'Test',
+        19: 'Start Software Download',
+        20: 'Download Section',
+        21: 'End Software Download',
+        22: 'Activate Software',
+        23: 'Commit Software',
+        24: 'Synchronize Time',
+        25: 'Reboot',
+        26: 'Get Next',
+        27: 'Test Result',
+        28: 'Get Current Data',
+        29: 'Set Table'
+    }
+
+    def __init__(self, device_id, get_stub):
+        Cmd.__init__(self)
+        self.get_stub = get_stub
+        self.device_id = device_id
+        self.prompt = '(' + self.colorize(
+            self.colorize('omci {}'.format(device_id), 'green'),
+            'bold') + ') '
+
+    def cmdloop(self, intro=None):
+        self._cmdloop()
+
+    do_exit = Cmd.do_quit
+
+    def do_quit(self, line):
+        return self._STOP_AND_EXIT
+
+    def get_device_mib(self, device_id, depth=-1):
+        stub = self.get_stub()
+
+        try:
+            res = stub.GetMibDeviceData(voltha_pb2.ID(id=device_id),
+                                        metadata=(('get-depth', str(depth)), ))
+        except Exception as e:
+            pass
+
+        return res
+
+    def help_show_mib(self):
+        self.poutput('show_mib [-d <device-id>] [-c <class-id> [-i <instance-id>]]' +
+                     linesep + '-d: <device-id>   ONU Device ID' +
+                     linesep + '-c: <class-id>    Managed Entity Class ID' +
+                     linesep + '-i: <instance-id> ME Instance ID')
+
+    @options([
+        make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+                    help='ONU Device ID', default=None),
+        make_option('-c', '--class-id', action="store", dest='class_id',
+                    type='int', help='Managed Entity Class ID', default=None),
+        make_option('-i', '--instance-id', action="store", dest='instance_id',
+                    type='int', help='ME Instance ID', default=None)
+    ])
+    def do_show_mib(self, _line, opts):
+        """
+        Show OMCI MIB Database Information
+        """
+        device_id = opts.device_id or self.device_id
+
+        if opts.class_id is not None and not 1 <= opts.class_id <= 0xFFFF:
+            self.poutput(self.colorize('Error: ', 'red') +
+                         self.colorize('Class ID must be 1..65535', 'blue'))
+            return
+
+        if opts.instance_id is not None and opts.class_id is None:
+            self.poutput(self.colorize('Error: ', 'red') +
+                         self.colorize('Class ID required if specifying an Instance ID',
+                                       'blue'))
+            return
+
+        if opts.instance_id is not None and not 0 <= opts.instance_id <= 0xFFFF:
+            self.poutput(self.colorize('Error: ', 'red') +
+                         self.colorize('Instance ID must be 0..65535', 'blue'))
+            return
+
+        try:
+            mib_db = self.get_device_mib(device_id, depth=-1)
+
+        except Exception:   # UnboundLocalError if Device ID not found in DB
+            self.poutput(self.colorize('Failed to get MIB database for ONU {}'
+                                       .format(device_id), 'red'))
+            return
+
+        mib = self._device_to_dict(mib_db)
+
+        self.poutput('OpenOMCI MIB Database for ONU {}'.format(device_id))
+
+        if opts.class_id is None and opts.instance_id is None:
+            self.poutput('Version            : {}'.format(mib[OmciCli.VERSION_KEY]))
+            self.poutput('Created            : {}'.format(mib[OmciCli.CREATED_KEY]))
+            self.poutput('Last In-Sync Time  : {}'.format(mib[OmciCli.LAST_SYNC_KEY]))
+            self.poutput('MIB Data Sync Value: {}'.format(mib[OmciCli.MDS_KEY]))
+
+        class_ids = [k for k in mib.iterkeys()
+                     if isinstance(k, int) and
+                     (opts.class_id is None or opts.class_id == k)]
+        class_ids.sort()
+
+        if len(class_ids) == 0 and opts.class_id is not None:
+            self.poutput(self.colorize('Class ID {} not found in MIB Database'
+                                       .format(opts.class_id), 'red'))
+            return
+
+        for cls_id in class_ids:
+            class_data = mib[cls_id]
+            self.poutput('  ----------------------------------------------')
+            self.poutput('  Class ID: {0} - ({0:#x})'.format(cls_id))
+
+            inst_ids = [k for k in class_data.iterkeys()
+                        if isinstance(k, int) and
+                        (opts.instance_id is None or opts.instance_id == k)]
+            inst_ids.sort()
+
+            if len(inst_ids) == 0 and opts.instance_id is not None:
+                self.poutput(self.colorize('Instance ID {} of Class ID {} not ' +
+                                           'found in MIB Database'.
+                                           format(opts.instance_id, opts.class_id),
+                                           'red'))
+                return
+
+            for inst_id in inst_ids:
+                inst_data = class_data[inst_id]
+                self.poutput('    Instance ID: {0} - ({0:#x})'.format(inst_id))
+                self.poutput('    Created    : {}'.format(inst_data[OmciCli.CREATED_KEY]))
+                self.poutput('    Modified   : {}'.format(inst_data[OmciCli.MODIFIED_KEY]))
+
+                attributes = inst_data[OmciCli.ATTRIBUTES_KEY]
+                attr_names = attributes.keys()
+                attr_names.sort()
+                max_len = max([len(attr) for attr in attr_names])
+
+                for attr in attr_names:
+                    name = self._cleanup_attribute_name(attr).ljust(max_len)
+                    value = attributes[attr]
+                    try:
+                        ivalue = int(value)
+                        self.poutput('      {0}: {1} - ({1:#x})'.format(name, ivalue))
+
+                    except ValueError:
+                        self.poutput('      {}: {}'.format(name, value))
+
+                if inst_id is not inst_ids[-1]:
+                    self.poutput(linesep)
+
+    def _cleanup_attribute_name(self, attr):
+        """Change underscore to space and capitalize first character"""
+        return ' '.join([v[0].upper() + v[1:] for v in attr.split('_')])
+
+    def _instance_to_dict(self, instance):
+        if not isinstance(instance, MibInstanceData):
+            raise TypeError('{} is not of type MibInstanceData'.format(type(instance)))
+
+        data = {
+            OmciCli.INSTANCE_ID_KEY: instance.instance_id,
+            OmciCli.CREATED_KEY: self._string_to_time(instance.created),
+            OmciCli.MODIFIED_KEY: self._string_to_time(instance.modified),
+            OmciCli.ATTRIBUTES_KEY: dict()
+        }
+        for attribute in instance.attributes:
+            data[OmciCli.ATTRIBUTES_KEY][attribute.name] = str(attribute.value)
+
+        return data
+
+    def _class_to_dict(self, val):
+        if not isinstance(val, MibClassData):
+            raise TypeError('{} is not of type MibClassData'.format(type(val)))
+
+        data = {
+            OmciCli.CLASS_ID_KEY: val.class_id,
+        }
+        for instance in val.instances:
+            data[instance.instance_id] = self._instance_to_dict(instance)
+        return data
+
+    def _device_to_dict(self, val):
+        if not isinstance(val, MibDeviceData):
+            raise TypeError('{} is not of type MibDeviceData'.format(type(val)))
+
+        data = {
+            OmciCli.DEVICE_ID_KEY: val.device_id,
+            OmciCli.CREATED_KEY: self._string_to_time(val.created),
+            OmciCli.LAST_SYNC_KEY: self._string_to_time(val.last_sync_time),
+            OmciCli.MDS_KEY: val.mib_data_sync,
+            OmciCli.VERSION_KEY: val.version,
+            OmciCli.ME_KEY: dict(),
+            OmciCli.MSG_TYPE_KEY: set()
+        }
+        for class_data in val.classes:
+            data[class_data.class_id] = self._class_to_dict(class_data)
+
+        for managed_entity in val.managed_entities:
+            data[OmciCli.ME_KEY][managed_entity.class_id] = managed_entity.name
+
+        for msg_type in val.message_types:
+            data[OmciCli.MSG_TYPE_KEY].add(msg_type.message_type)
+
+        return data
+
+    def _string_to_time(self, time):
+        return datetime.strptime(time, OmciCli.TIME_FORMAT) if len(time) else None
+
+    def help_show_me(self):
+        self.poutput('show_me [-d <device-id>]' +
+                     linesep + '-d: <device-id>   ONU Device ID')
+
+    @options([
+        make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+                    help='ONU Device ID', default=None),
+    ])
+    def do_show_me(self, _line, opts):
+        """ Show supported OMCI Managed Entities"""
+
+        device_id = opts.device_id or self.device_id
+
+        try:
+            mib_db = self.get_device_mib(device_id, depth=1)
+            mib = self._device_to_dict(mib_db)
+
+        except Exception:   # UnboundLocalError if Device ID not found in DB
+            self.poutput(self.colorize('Failed to get supported ME information for ONU {}'
+                                       .format(device_id), 'red'))
+            return
+
+        class_ids = [class_id for class_id in mib[OmciCli.ME_KEY].keys()]
+        class_ids.sort()
+
+        self.poutput('Supported Managed Entities for ONU {}'.format(device_id))
+        for class_id in class_ids:
+            self.poutput('    {0} - ({0:#x}): {1}'.format(class_id,
+                                                          mib[OmciCli.ME_KEY][class_id]))
+
+    def help_show_msg_types(self):
+        self.poutput('show_msg_types [-d <device-id>]' +
+                     linesep + '-d: <device-id>   ONU Device ID')
+
+    @options([
+        make_option('-d', '--device-id', action="store", dest='device_id', type='string',
+                    help='ONU Device ID', default=None),
+    ])
+    def do_show_msg_types(self, _line, opts):
+        """ Show supported OMCI Message Types"""
+        device_id = opts.device_id or self.device_id
+
+        try:
+            mib_db = self.get_device_mib(device_id, depth=1)
+            mib = self._device_to_dict(mib_db)
+
+        except Exception:   # UnboundLocalError if Device ID not found in DB
+            self.poutput(self.colorize('Failed to get supported Message Types for ONU {}'
+                                       .format(device_id), 'red'))
+            return
+
+        msg_types = [msg_type for msg_type in mib[OmciCli.MSG_TYPE_KEY]]
+        msg_types.sort()
+
+        self.poutput('Supported Message Types for ONU {}'.format(device_id))
+        for msg_type in msg_types:
+            self.poutput('    {0} - ({0:#x}): {1}'.
+                         format(msg_type,
+                                OmciCli.MSG_TYPE_TO_NAME.get(msg_type, 'Unknown')))
+
+    def get_devices(self):
+        stub = self.get_stub()
+        res = stub.ListDevices(Empty())
+        return res.items
+
+    def do_devices(self, line):
+        """List devices registered in Voltha reduced for OMCI menu"""
+        devices = self.get_devices()
+        omit_fields = {
+            'adapter',
+            'model',
+            'hardware_version',
+            'images',
+            'firmware_version',
+            'serial_number',
+            'vlan',
+            'root',
+            'extra_args',
+            'proxy_address',
+        }
+        print_pb_list_as_table('Devices:', devices, omit_fields, self.poutput)
+
+    def help_devices(self):
+        self.poutput('TODO: Provide some help')
+
+    def poutput(self, msg):
+        """Convenient shortcut for self.stdout.write(); adds newline if necessary."""
+        if msg:
+            self.stdout.write(msg)
+            if msg[-1] != '\n':
+                self.stdout.write('\n')
diff --git a/python/cli/setup.sh b/python/cli/setup.sh
new file mode 100755
index 0000000..6cab0bf
--- /dev/null
+++ b/python/cli/setup.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+while getopts LGC:g:s: option
+do
+    case "${option}"
+    in
+	L) LOOKUP_OPT="-L";;
+	G) GLOBAL_REQUEST_OPT="-G";;
+	C) CONSUL_OPT="-C ${OPTARG}";;
+	g) GRPC_OPT="-g ${OPTARG}";;
+	s) SIM_OPT="-s ${OPTARG}";;
+    esac
+done
+
+if [ -z "$CONSUL_OPT" ]
+then
+    CONSUL_OPT="-C $DOCKER_HOST_IP:8500"
+fi
+
+echo "export DOCKER_HOST_IP=$DOCKER_HOST_IP" > /home/voltha/.bashrc
+echo "export PYTHONPATH=/voltha" >> /home/voltha/.bashrc
+echo "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" >> /home/voltha/.bashrc
+echo "export DOCKER_HOST_IP=$DOCKER_HOST_IP" > /home/voltha/.bash_profile
+echo "export PYTHONPATH=/voltha" >> /home/voltha/.bash_profile
+echo "export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" >> /home/voltha/.bash_profile
+echo "/voltha/python/cli/main.py $LOOKUP_OPT $GLOBAL_REQUEST_OPT $CONSUL_OPT $GRPC_OPT $SIM_OPT" >> /home/voltha/.bash_profile
+echo "logout" >> /home/voltha/.bash_profile
+chown voltha.voltha /home/voltha/.bash_profile
+/usr/sbin/sshd -D
+
diff --git a/python/cli/table.py b/python/cli/table.py
new file mode 100644
index 0000000..7e6a4d8
--- /dev/null
+++ b/python/cli/table.py
@@ -0,0 +1,204 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import sys
+
+from google.protobuf.message import Message
+from termcolor import colored
+
+_printfn = lambda l: sys.stdout.write(l + '\n')
+
+
+class TablePrinter(object):
+    """Simple tabular data printer utility. For usage, see bottom of file"""
+
+    def __init__(self):
+        self.max_field_lengths = {}
+        self.field_names = {}
+        self.cell_values = {}
+
+    def add_cell(self, row_number, field_key, field_name, value):
+        if not isinstance(value, str):
+            value = str(value)
+        self._add_field_type(field_key, field_name)
+        row = self.cell_values.setdefault(row_number, {})
+        row[field_key] = value
+        self._update_max_length(field_key, value)
+
+    def number_of_rows(self):
+        return len(self.cell_values)
+
+    def print_table(self, header=None, printfn=_printfn, dividers=10):
+
+        if header is not None:
+            printfn(header)
+
+        field_keys = sorted(self.field_names.keys())
+
+        if not field_keys:
+            printfn('table empty')
+            return
+
+        def p_sep():
+            printfn('+' + '+'.join(
+                [(self.max_field_lengths[k] + 2) * '-'
+                 for k in field_keys]) + '+')
+
+        p_sep()
+
+        printfn('| ' + ' | '.join(
+            '%%%ds' % self.max_field_lengths[k] % self.field_names[k]
+            for k in field_keys) + ' |')
+        p_sep()
+
+        for i in range(len(self.cell_values)):
+            row = self.cell_values[i]
+            printfn(colored('| ' + ' | '.join(
+                '%%%ds' % self.max_field_lengths[k] % row.get(k, '')
+                for k in field_keys
+            ) + ' |'))
+            if not ((i + 1) % dividers):
+                p_sep()
+
+        if (i + 1) % dividers:
+            p_sep()
+
+    def _update_max_length(self, field_key, string):
+        length = len(string)
+        if length > self.max_field_lengths.get(field_key, 0):
+            self.max_field_lengths[field_key] = length
+
+    def _add_field_type(self, field_key, field_name):
+        if field_key not in self.field_names:
+            self.field_names[field_key] = field_name
+            self._update_max_length(field_key, field_name)
+        else:
+            assert self.field_names[field_key] == field_name
+
+
+def print_pb_list_as_table(header, items, fields_to_omit=None,
+                           printfn=_printfn, dividers=10, show_nulls=False,
+                           presfns={}):
+    from utils import pb2dict
+
+    t = TablePrinter()
+    for row, obj in enumerate(items):
+        assert isinstance(obj, Message)
+
+        def set_row(pd_dict, _row, field, value, t, prefix,
+                      fields_to_omit, number):
+            fname = prefix + field.name
+            if fname in fields_to_omit:
+                return
+            if isinstance(value, Message):
+                add(_row, value, fname + '.',
+                    100 * (number + field.number))
+            else:
+                presentationfn = presfns[fname] if fname in presfns else lambda x: x
+                t.add_cell(_row, number + field.number, fname,
+                           presentationfn(pd_dict.get(field.name)))
+
+        def add(_row, pb, prefix='', number=0):
+            d = pb2dict(pb)
+            if show_nulls:
+                fields = pb.DESCRIPTOR.fields
+                for field in fields:
+                    set_row(d,
+                            _row,
+                            field,
+                            getattr(pb, field.name),
+                            t,
+                            prefix,
+                            fields_to_omit,
+                            number)
+            else:
+                fields = pb.ListFields()
+                for (field, value) in fields:
+                    set_row(d,
+                            _row,
+                            field,
+                            value,
+                            t,
+                            prefix,
+                            fields_to_omit,
+                            number)
+        add(row, obj)
+
+    t.print_table(header, printfn, dividers)
+
+
+def print_pb_as_table(header, pb, fields_to_omit={}, printfn=_printfn,
+                      show_nulls=False):
+
+    from utils import pb2dict
+
+    def is_repeated_item(msg):
+        return hasattr(msg, "extend")
+
+    def set_cell(pb, field, value, t, prefix, fields_to_omit):
+        d = pb2dict(pb)
+        fname = prefix + field.name
+
+        if fname in fields_to_omit:
+            return
+        if isinstance(value, Message):
+            pr(value, fname + '.')
+        elif is_repeated_item(value): # handles any list
+            row = t.number_of_rows()
+            t.add_cell(row, 0, 'field', fname)
+            t.add_cell(row, 1, 'value',
+                       '{} item(s)'.format(len(d.get(field.name))))
+        else:
+            row = t.number_of_rows()
+            t.add_cell(row, 0, 'field', fname)
+            t.add_cell(row, 1, 'value', value)
+
+
+    t = TablePrinter()
+
+    def pr(_pb, prefix=''):
+        if show_nulls:
+            fields = _pb.DESCRIPTOR.fields
+            for field in sorted(fields, key=lambda f: f.number):
+                set_cell(_pb,
+                        field,
+                        getattr(_pb, field.name),
+                        t,
+                        prefix,
+                        fields_to_omit)
+        else:
+            fields = _pb.ListFields()
+            for (field, value) in sorted(fields, key=lambda (f, v): f.number):
+                set_cell(_pb,
+                        field,
+                        value,
+                        t,
+                        prefix,
+                        fields_to_omit)
+
+    pr(pb)
+
+    t.print_table(header, printfn)
+
+
+if __name__ == '__main__':
+    import random
+
+    t = TablePrinter()
+    for row in range(10):
+        t.add_cell(row, 0, 'id', row + 100)
+        t.add_cell(row, 1, 'name', 'Joe Somebody')
+        t.add_cell(row, 2, 'ows', '${}'.format(random.randint(10, 100000)))
+    t.print_table()
diff --git a/python/cli/utils.py b/python/cli/utils.py
new file mode 100644
index 0000000..38e5ee2
--- /dev/null
+++ b/python/cli/utils.py
@@ -0,0 +1,186 @@
+#
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import sys
+
+from google.protobuf.json_format import MessageToDict
+from termcolor import cprint, colored
+
+from table import TablePrinter
+
+
+_printfn = lambda l: sys.stdout.write(l + '\n')
+
+
+def pb2dict(pb_msg):
+    d = MessageToDict(pb_msg, including_default_value_fields=1,
+                      preserving_proto_field_name=1)
+    return d
+
+
+def p_cookie(cookie):
+    cookie =  '%x' % int(cookie)
+    if len(cookie) > 8:
+        return '~' + cookie[len(cookie)-8:]
+    else:
+        return cookie
+
+'''
+    OFPP_NORMAL     = 0x7ffffffa;  /* Forward using non-OpenFlow pipeline. */
+    OFPP_FLOOD      = 0x7ffffffb;  /* Flood using non-OpenFlow pipeline. */
+    OFPP_ALL        = 0x7ffffffc;  /* All standard ports except input port. */
+    OFPP_CONTROLLER = 0x7ffffffd;  /* Send to controller. */
+    OFPP_LOCAL      = 0x7ffffffe;  /* Local openflow "port". */
+    OFPP_ANY        = 0x7fffffff;  /* Special value used in some requests when
+'''
+
+
+def p_port(port):
+    if port & 0x7fffffff == 0x7ffffffa:
+        return 'NORMAL'
+    elif port & 0x7fffffff == 0x7ffffffb:
+        return 'FLOOD'
+    elif port & 0x7fffffff == 0x7ffffffc:
+        return 'ALL'
+    elif port & 0x7fffffff == 0x7ffffffd:
+        return 'CONTROLLER'
+    elif port & 0x7fffffff == 0x7ffffffe:
+        return 'LOCAL'
+    elif port & 0x7fffffff == 0x7fffffff:
+        return 'ANY'
+    else:
+        return str(port)
+
+
+def p_vlan_vid(vlan_vid):
+    if vlan_vid == 0:
+        return 'untagged'
+    assert vlan_vid & 4096 == 4096
+    return str(vlan_vid - 4096)
+
+
+def p_ipv4(x):
+    return '.'.join(str(v) for v in [
+        (x >> 24) & 0xff, (x >> 16) & 0xff, (x >> 8) & 0xff, x & 0xff
+    ])
+
+
+field_printers = {
+    'IN_PORT': lambda f: (100, 'in_port', p_port(f['port'])),
+    'VLAN_VID': lambda f: (101, 'vlan_vid', p_vlan_vid(f['vlan_vid'])),
+    'VLAN_PCP': lambda f: (102, 'vlan_pcp', str(f['vlan_pcp'])),
+    'ETH_TYPE': lambda f: (103, 'eth_type', '%X' % f['eth_type']),
+    'IP_PROTO': lambda f: (104, 'ip_proto', str(f['ip_proto'])),
+    'IPV4_DST': lambda f: (105, 'ipv4_dst', p_ipv4(f['ipv4_dst'])),
+    'UDP_SRC': lambda f: (106, 'udp_src', str(f['udp_src'])),
+    'UDP_DST': lambda f: (107, 'udp_dst', str(f['udp_dst'])),
+    'TCP_SRC': lambda f: (108, 'tcp_src', str(f['tcp_src'])),
+    'TCP_DST': lambda f: (109, 'tcp_dst', str(f['tcp_dst'])),
+    'METADATA': lambda f: (110, 'metadata', str(f['table_metadata'])),
+}
+
+
+def p_field(field):
+    assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+    ofb = field['ofb_field']
+    assert not ofb['has_mask']
+    type = ofb['type'][len('OFPXMT_OFB_'):]
+    weight, field_name, value = field_printers[type](ofb)
+    return 1000 + weight, 'set_' + field_name, value
+
+
+action_printers = {
+    'SET_FIELD': lambda a: p_field(a['set_field']['field']),
+    'POP_VLAN': lambda a: (2000, 'pop_vlan', 'Yes'),
+    'PUSH_VLAN': lambda a: (2001, 'push_vlan', '%x' % a['push']['ethertype']),
+    'GROUP': lambda a: (3000, 'group', p_port(a['group']['group_id'])),
+    'OUTPUT': lambda a: (4000, 'output', p_port(a['output']['port'])),
+}
+
+
+def print_flows(what, id, type, flows, groups, printfn=_printfn):
+
+    header = ''.join([
+        '{} '.format(what),
+        colored(id, color='green', attrs=['bold']),
+        ' (type: ',
+        colored(type, color='blue'),
+        ')'
+    ]) + '\nFlows ({}):'.format(len(flows))
+
+    table = TablePrinter()
+    for i, flow in enumerate(flows):
+
+        table.add_cell(i, 0, 'table_id', value=str(flow['table_id']))
+        table.add_cell(i, 1, 'priority', value=str(flow['priority']))
+        table.add_cell(i, 2, 'cookie', p_cookie(flow['cookie']))
+
+        assert flow['match']['type'] == 'OFPMT_OXM'
+        for field in flow['match']['oxm_fields']:
+            assert field['oxm_class'].endswith('OPENFLOW_BASIC')
+            ofb = field['ofb_field']
+            # see CORD-816 (https://jira.opencord.org/browse/CORD-816)
+            assert not ofb['has_mask'], 'masked match not handled yet'
+            type = ofb['type'][len('OFPXMT_OFB_'):]
+            table.add_cell(i, *field_printers[type](ofb))
+
+        for instruction in flow['instructions']:
+            itype = instruction['type']
+            if itype == 4:
+                for action in instruction['actions']['actions']:
+                    atype = action['type'][len('OFPAT_'):]
+                    table.add_cell(i, *action_printers[atype](action))
+            elif itype == 1:
+                table.add_cell(i, 10000, 'goto-table',
+                               instruction['goto_table']['table_id'])
+            elif itype == 5:
+                table.add_cell(i, 10000, 'clear-actions', [])
+            else:
+                raise NotImplementedError(
+                    'not handling instruction type {}'.format(itype))
+
+    table.print_table(header, printfn)
+
+
+def print_groups(what, id, type, groups, printfn=_printfn):
+    header = ''.join([
+        '{} '.format(what),
+        colored(id, color='green', attrs=['bold']),
+        ' (type: ',
+        colored(type, color='blue'),
+        ')'
+    ]) + '\nGroups ({}):'.format(len(groups))
+
+    table = TablePrinter()
+    for i, group in enumerate(groups):
+        output_ports = []
+        for bucket in group['desc']['buckets']:
+            for action in bucket['actions']:
+                if action['type'] == 'OFPAT_OUTPUT':
+                   output_ports.append(action['output']['port'])
+        table.add_cell(i, 0, 'group_id', value=str(group['desc']['group_id']))
+        table.add_cell(i, 1, 'buckets', value=str(dict(output=output_ports)))
+
+    table.print_table(header, printfn)
+
+def dict2line(d):
+    assert isinstance(d, dict)
+    return ', '.join('{}: {}'.format(k, v) for k, v in sorted(d.items()))
+
+def enum2name(msg_obj, enum_type, enum_value):
+    descriptor = msg_obj.DESCRIPTOR.enum_types_by_name[enum_type]
+    name = descriptor.values_by_number[enum_value].name
+    return name
diff --git a/python/common/frameio/frameio.py b/python/common/frameio/frameio.py
deleted file mode 100644
index 3f5bcf6..0000000
--- a/python/common/frameio/frameio.py
+++ /dev/null
@@ -1,437 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-A module that can send and receive raw ethernet frames on a set of interfaces
-and it can manage a set of vlan interfaces on top of existing
-interfaces. Due to reliance on raw sockets, this module requires
-root access. Also, raw sockets are hard to deal with in Twisted (not
-directly supported) we need to run the receiver select loop on a dedicated
-thread.
-"""
-
-import os
-import socket
-import struct
-import uuid
-from pcapy import BPFProgram
-from threading import Thread, Condition
-
-import fcntl
-
-import select
-import structlog
-import sys
-
-from scapy.data import ETH_P_ALL
-from twisted.internet import reactor
-from zope.interface import implementer
-
-from voltha.registry import IComponent
-
-if sys.platform.startswith('linux'):
-    from common.frameio.third_party.oftest import afpacket, netutils
-elif sys.platform == 'darwin':
-    from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
-
-log = structlog.get_logger()
-
-
-def hexify(buffer):
-    """
-    Return a hexadecimal string encoding of input buffer
-    """
-    return ''.join('%02x' % ord(c) for c in buffer)
-
-
-class _SelectWakerDescriptor(object):
-    """
-    A descriptor that can be mixed into a select loop to wake it up.
-    """
-    def __init__(self):
-        self.pipe_read, self.pipe_write = os.pipe()
-        fcntl.fcntl(self.pipe_write, fcntl.F_SETFL, os.O_NONBLOCK)
-
-    def __del__(self):
-        os.close(self.pipe_read)
-        os.close(self.pipe_write)
-
-    def fileno(self):
-        return self.pipe_read
-
-    def wait(self):
-        os.read(self.pipe_read, 1)
-
-    def notify(self):
-        """Trigger a select loop"""
-        os.write(self.pipe_write, '\x00')
-
-
-class BpfProgramFilter(object):
-    """
-    Convenience packet filter based on the well-tried Berkeley Packet Filter,
-    used by many well known open source tools such as pcap and tcpdump.
-    """
-    def __init__(self, program_string):
-        """
-        Create a filter using the BPF command syntax. To learn more,
-        consult 'man pcap-filter'.
-        :param program_string: The textual definition of the filter. Examples:
-        'vlan 1000'
-        'vlan 1000 and ip src host 10.10.10.10'
-        """
-        self.bpf = BPFProgram(program_string)
-
-    def __call__(self, frame):
-        """
-        Return 1 if frame passes filter.
-        :param frame: Raw frame provided as Python string
-        :return: 1 if frame satisfies filter, 0 otherwise.
-        """
-        return self.bpf.filter(frame)
-
-
-class FrameIOPort(object):
-    """
-    Represents a network interface which we can send/receive raw
-    Ethernet frames.
-    """
-
-    RCV_SIZE_DEFAULT = 4096
-    ETH_P_ALL = 0x03
-    RCV_TIMEOUT = 10000
-    MIN_PKT_SIZE = 60
-
-    def __init__(self, iface_name):
-        self.iface_name = iface_name
-        self.proxies = []
-        self.socket = self.open_socket(self.iface_name)
-        log.debug('socket-opened', fn=self.fileno(), iface=iface_name)
-        self.received = 0
-        self.discarded = 0
-
-    def add_proxy(self, proxy):
-        self.proxies.append(proxy)
-
-    def del_proxy(self, proxy):
-        self.proxies = [p for p in self.proxies if p.name != proxy.name]
-
-    def open_socket(self, iface_name):
-        raise NotImplementedError('to be implemented by derived class')
-
-    def rcv_frame(self):
-        raise NotImplementedError('to be implemented by derived class')
-
-    def __del__(self):
-        if self.socket:
-            self.socket.close()
-            self.socket = None
-        log.debug('socket-closed', iface=self.iface_name)
-
-    def fileno(self):
-        return self.socket.fileno()
-
-    def _dispatch(self, proxy, frame):
-        log.debug('calling-publisher', proxy=proxy.name, frame=hexify(frame))
-        try:
-            proxy.callback(proxy, frame)
-        except Exception as e:
-            log.exception('callback-error',
-                          explanation='Callback failed while processing frame',
-                          e=e)
-
-    def recv(self):
-        """Called on the select thread when a packet arrives"""
-        try:
-            frame = self.rcv_frame()
-        except RuntimeError as e:
-            # we observed this happens sometimes right after the socket was
-            # attached to a newly created veth interface. So we log it, but
-            # allow to continue.
-            log.warn('afpacket-recv-error', code=-1)
-            return
-
-        log.debug('frame-received', iface=self.iface_name, len=len(frame),
-                  hex=hexify(frame))
-        self.received +=1
-        dispatched = False
-        for proxy in self.proxies:
-            if proxy.filter is None or proxy.filter(frame):
-                log.debug('frame-dispatched')
-                dispatched = True
-                reactor.callFromThread(self._dispatch, proxy, frame)
-
-        if not dispatched:
-            self.discarded += 1
-            log.debug('frame-discarded')
-
-    def send(self, frame):
-        log.debug('sending', len=len(frame), iface=self.iface_name)
-        sent_bytes = self.send_frame(frame)
-        if sent_bytes != len(frame):
-            log.error('send-error', iface=self.iface_name,
-                      wanted_to_send=len(frame), actually_sent=sent_bytes)
-        return sent_bytes
-
-    def send_frame(self, frame):
-        try:
-            return self.socket.send(frame)
-        except socket.error, err:
-            if err[0] == os.errno.EINVAL:
-                if len(frame) < self.MIN_PKT_SIZE:
-                    padding = '\x00' * (self.MIN_PKT_SIZE - len(frame))
-                    frame = frame + padding
-                    return self.socket.send(frame)
-            else:
-                raise
-
-    def up(self):
-        if sys.platform.startswith('darwin'):
-            pass
-        else:
-            os.system('ip link set {} up'.format(self.iface_name))
-        return self
-
-    def down(self):
-        if sys.platform.startswith('darwin'):
-            pass
-        else:
-            os.system('ip link set {} down'.format(self.iface_name))
-        return self
-
-    def statistics(self):
-        return self.received, self.discarded
-
-
-class LinuxFrameIOPort(FrameIOPort):
-
-    def open_socket(self, iface_name):
-        s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0)
-        afpacket.enable_auxdata(s)
-        s.bind((self.iface_name, self.ETH_P_ALL))
-        netutils.set_promisc(s, iface_name)
-        s.settimeout(self.RCV_TIMEOUT)
-        return s
-
-    def rcv_frame(self):
-        return afpacket.recv(self.socket, self.RCV_SIZE_DEFAULT)
-
-
-class DarwinFrameIOPort(FrameIOPort):
-
-    def open_socket(self, iface_name):
-        sin = pcapdnet.open_pcap(iface_name, 1600, 1, 100)
-        try:
-            fcntl.ioctl(sin.fileno(), BIOCIMMEDIATE, struct.pack("I",1))
-        except:
-            pass
-
-        # need a different kind of socket for sending out
-        self.sout = dnet.eth(iface_name)
-
-        return sin
-
-    def send_frame(self, frame):
-        return self.sout.send(frame)
-
-    def rcv_frame(self):
-        pkt = self.socket.next()
-        if pkt is not None:
-            ts, pkt = pkt
-        return pkt
-
-
-if sys.platform == 'darwin':
-    _FrameIOPort = DarwinFrameIOPort
-elif sys.platform.startswith('linux'):
-    _FrameIOPort = LinuxFrameIOPort
-else:
-    raise Exception('Unsupported platform {}'.format(sys.platform))
-    sys.exit(1)
-
-
-class FrameIOPortProxy(object):
-    """Makes FrameIOPort sharable between multiple users"""
-
-    def __init__(self, frame_io_port, callback, filter=None, name=None):
-        self.frame_io_port = frame_io_port
-        self.callback = callback
-        self.filter = filter
-        self.name = uuid.uuid4().hex[:12] if name is None else name
-
-    @property
-    def iface_name(self):
-        return self.frame_io_port.iface_name
-
-    def get_iface_name(self):
-        return self.frame_io_port.iface_name
-
-    def send(self, frame):
-        return self.frame_io_port.send(frame)
-
-    def up(self):
-        self.frame_io_port.up()
-        return self
-
-    def down(self):
-        self.frame_io_port.down()
-        return self
-
-
-@implementer(IComponent)
-class FrameIOManager(Thread):
-    """
-    Packet/Frame IO manager that can be used to send/receive raw frames
-    on a set of network interfaces.
-    """
-    def __init__(self):
-        super(FrameIOManager, self).__init__()
-
-        self.ports = {}  # iface_name -> ActiveFrameReceiver
-        self.queue = {}  # iface_name -> TODO
-
-        self.cvar = Condition()
-        self.waker = _SelectWakerDescriptor()
-        self.stopped = False
-        self.ports_changed = False
-
-    # ~~~~~~~~~~~ exposed methods callable from main thread ~~~~~~~~~~~~~~~~~~~
-
-    def start(self):
-        """
-        Start the IO manager and its select loop thread
-        """
-        log.debug('starting')
-        super(FrameIOManager, self).start()
-        log.info('started')
-        return self
-
-    def stop(self):
-        """
-        Stop the IO manager and its thread with the select loop
-        """
-        log.debug('stopping')
-        self.stopped = True
-        self.waker.notify()
-        self.join()
-        del self.ports
-        log.info('stopped')
-
-    def list_interfaces(self):
-        """
-        Return list of interfaces listened on
-        :return: List of FrameIOPort objects
-        """
-        return self.ports
-
-    def open_port(self, iface_name, callback, filter=None, name=None):
-        """
-        Add a new interface and start receiving on it.
-        :param iface_name: Name of the interface. Must be an existing Unix
-        interface (eth0, en0, etc.)
-        :param callback: Called on each received frame;
-        signature: def callback(port, frame) where port is the FrameIOPort
-        instance at which the frame was received, frame is the actual frame
-        received (as binay string)
-        :param filter: An optional filter (predicate), with signature:
-        def filter(frame). If provided, only frames for which filter evaluates
-        to True will be forwarded to callback.
-        :return: FrmaeIOPortProxy instance.
-        """
-
-        port = self.ports.get(iface_name)
-        if port is None:
-            port = _FrameIOPort(iface_name)
-            self.ports[iface_name] = port
-            self.ports_changed = True
-            self.waker.notify()
-
-        proxy = FrameIOPortProxy(port, callback, filter, name)
-        port.add_proxy(proxy)
-
-        return proxy
-
-    def close_port(self, proxy):
-        """
-        Remove the proxy. If this is the last proxy on an interface, stop and
-        remove the named interface as well
-        :param proxy: FrameIOPortProxy reference
-        :return: None
-        """
-        assert isinstance(proxy, FrameIOPortProxy)
-        iface_name = proxy.get_iface_name()
-        assert iface_name in self.ports, "iface_name {} unknown".format(iface_name)
-        port = self.ports[iface_name]
-        port.del_proxy(proxy)
-
-        if not port.proxies:
-            del self.ports[iface_name]
-            # need to exit select loop to reconstruct select fd lists
-            self.ports_changed = True
-            self.waker.notify()
-
-    def send(self, iface_name, frame):
-        """
-        Send frame on given interface
-        :param iface_name: Name of previously registered interface
-        :param frame: frame as string
-        :return: number of bytes sent
-        """
-        return self.ports[iface_name].send(frame)
-
-    # ~~~~~~~~~~~~~ Thread methods (running on non-main thread ~~~~~~~~~~~~~~~~
-
-    def run(self):
-        """
-        Called on the alien thread, this is the core multi-port receive loop
-        """
-
-        log.debug('select-loop-started')
-
-        # outer loop constructs sockets list for select
-        while not self.stopped:
-            sockets = [self.waker] + self.ports.values()
-            self.ports_changed = False
-            empty = []
-            # inner select loop
-
-            while not self.stopped:
-                try:
-                    _in, _out, _err = select.select(sockets, empty, empty, 1)
-                except Exception as e:
-                    log.exception('frame-io-select-error', e=e)
-                    break
-                with self.cvar:
-                    for port in _in:
-                        if port is self.waker:
-                            self.waker.wait()
-                            continue
-                        else:
-                            port.recv()
-                    self.cvar.notify_all()
-                if self.ports_changed:
-                    break  # break inner loop so we reconstruct sockets list
-
-        log.debug('select-loop-exited')
-
-    def del_interface(self, iface_name):
-        """
-            Delete interface for stopping
-        """
-
-        log.info('Delete interface')
-        del self.ports[iface_name]
-        log.info('Interface(port) is deleted')
diff --git a/python/common/frameio/third_party/__init__.py b/python/common/frameio/third_party/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/common/frameio/third_party/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/common/frameio/third_party/oftest/LICENSE b/python/common/frameio/third_party/oftest/LICENSE
deleted file mode 100644
index 3216042..0000000
--- a/python/common/frameio/third_party/oftest/LICENSE
+++ /dev/null
@@ -1,36 +0,0 @@
-OpenFlow Test Framework
-
-Copyright (c) 2010 The Board of Trustees of The Leland Stanford
-Junior University
-
-Except where otherwise noted, this software is distributed under
-the OpenFlow Software License.  See
-http://www.openflowswitch.org/wp/legal/ for current details.
-
-We are making the OpenFlow specification and associated documentation
-(Software) available for public use and benefit with the expectation
-that others will use, modify and enhance the Software and contribute
-those enhancements back to the community. However, since we would like
-to make the Software available for broadest use, with as few
-restrictions as possible permission is hereby granted, free of charge,
-to any person obtaining a copy of this Software to deal in the
-Software under the copyrights without restriction, including without
-limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED -Y´AS IS¡, WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-The name and trademarks of copyright holder(s) may NOT be used in
-advertising or publicity pertaining to the Software or any derivatives
-without specific, written prior permission.
diff --git a/python/common/frameio/third_party/oftest/README.md b/python/common/frameio/third_party/oftest/README.md
deleted file mode 100644
index f0cb649..0000000
--- a/python/common/frameio/third_party/oftest/README.md
+++ /dev/null
@@ -1,6 +0,0 @@
-Files in this directory are derived from the respective files
-in oftest (http://github.com/floodlight/oftest).
- 
-For the licensing terms of these files, see LICENSE in this dir.
- 
-
diff --git a/python/common/frameio/third_party/oftest/__init__.py b/python/common/frameio/third_party/oftest/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/common/frameio/third_party/oftest/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/common/frameio/third_party/oftest/afpacket.py b/python/common/frameio/third_party/oftest/afpacket.py
deleted file mode 100644
index 9ae8075..0000000
--- a/python/common/frameio/third_party/oftest/afpacket.py
+++ /dev/null
@@ -1,124 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-AF_PACKET receive support
-
-When VLAN offload is enabled on the NIC Linux will not deliver the VLAN tag
-in the data returned by recv. Instead, it delivers the VLAN TCI in a control
-message. Python 2.x doesn't have built-in support for recvmsg, so we have to
-use ctypes to call it. The recv function exported by this module reconstructs
-the VLAN tag if it was offloaded.
-"""
-
-import struct
-from ctypes import *
-
-ETH_P_8021Q = 0x8100
-SOL_PACKET = 263
-PACKET_AUXDATA = 8
-TP_STATUS_VLAN_VALID = 1 << 4
-
-class struct_iovec(Structure):
-    _fields_ = [
-        ("iov_base", c_void_p),
-        ("iov_len", c_size_t),
-    ]
-
-class struct_msghdr(Structure):
-    _fields_ = [
-        ("msg_name", c_void_p),
-        ("msg_namelen", c_uint32),
-        ("msg_iov", POINTER(struct_iovec)),
-        ("msg_iovlen", c_size_t),
-        ("msg_control", c_void_p),
-        ("msg_controllen", c_size_t),
-        ("msg_flags", c_int),
-    ]
-
-class struct_cmsghdr(Structure):
-    _fields_ = [
-        ("cmsg_len", c_size_t),
-        ("cmsg_level", c_int),
-        ("cmsg_type", c_int),
-    ]
-
-class struct_tpacket_auxdata(Structure):
-    _fields_ = [
-        ("tp_status", c_uint),
-        ("tp_len", c_uint),
-        ("tp_snaplen", c_uint),
-        ("tp_mac", c_ushort),
-        ("tp_net", c_ushort),
-        ("tp_vlan_tci", c_ushort),
-        ("tp_padding", c_ushort),
-    ]
-
-libc = CDLL("libc.so.6")
-recvmsg = libc.recvmsg
-recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
-recvmsg.retype = c_int
-
-def enable_auxdata(sk):
-    """
-    Ask the kernel to return the VLAN tag in a control message
-
-    Must be called on the socket before afpacket.recv.
-    """
-    sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
-
-def recv(sk, bufsize):
-    """
-    Receive a packet from an AF_PACKET socket
-    @sk Socket
-    @bufsize Maximum packet size
-    """
-    buf = create_string_buffer(bufsize)
-
-    ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
-    ctrl_buf = create_string_buffer(ctrl_bufsize)
-
-    iov = struct_iovec()
-    iov.iov_base = cast(buf, c_void_p)
-    iov.iov_len = bufsize
-
-    msghdr = struct_msghdr()
-    msghdr.msg_name = None
-    msghdr.msg_namelen = 0
-    msghdr.msg_iov = pointer(iov)
-    msghdr.msg_iovlen = 1
-    msghdr.msg_control = cast(ctrl_buf, c_void_p)
-    msghdr.msg_controllen = ctrl_bufsize
-    msghdr.msg_flags = 0
-
-    rv = recvmsg(sk.fileno(), byref(msghdr), 0)
-    if rv < 0:
-        raise RuntimeError("recvmsg failed: rv=%d", rv)
-
-    # The kernel only delivers control messages we ask for. We
-    # only enabled PACKET_AUXDATA, so we can assume it's the
-    # only control message.
-    assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
-
-    cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
-    assert cmsghdr.cmsg_level == SOL_PACKET
-    assert cmsghdr.cmsg_type == PACKET_AUXDATA
-
-    auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
-
-    if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
-        # Insert VLAN tag
-        tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
-        return buf.raw[:12] + tag + buf.raw[12:rv]
-    else:
-        return buf.raw[:rv]
diff --git a/python/common/frameio/third_party/oftest/netutils.py b/python/common/frameio/third_party/oftest/netutils.py
deleted file mode 100644
index 092d490..0000000
--- a/python/common/frameio/third_party/oftest/netutils.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Network utilities for the OpenFlow test framework
-"""
-
-###########################################################################
-##                                                                         ##
-## Promiscuous mode enable/disable                                         ##
-##                                                                         ##
-## Based on code from Scapy by Phillippe Biondi                            ##
-##                                                                         ##
-##                                                                         ##
-## This program is free software; you can redistribute it and/or modify it ##
-## under the terms of the GNU General Public License as published by the   ##
-## Free Software Foundation; either version 2, or (at your option) any     ##
-## later version.                                                          ##
-##                                                                         ##
-## This program is distributed in the hope that it will be useful, but     ##
-## WITHOUT ANY WARRANTY; without even the implied warranty of              ##
-## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU       ##
-## General Public License for more details.                                ##
-##                                                                         ##
-#############################################################################
-
-import socket
-from fcntl import ioctl
-import struct
-
-# From net/if_arp.h
-ARPHDR_ETHER = 1
-ARPHDR_LOOPBACK = 772
-
-# From bits/ioctls.h
-SIOCGIFHWADDR  = 0x8927          # Get hardware address
-SIOCGIFINDEX   = 0x8933          # name -> if_index mapping
-
-# From netpacket/packet.h
-PACKET_ADD_MEMBERSHIP  = 1
-PACKET_DROP_MEMBERSHIP = 2
-PACKET_MR_PROMISC      = 1
-
-# From bits/socket.h
-SOL_PACKET = 263
-
-def get_if(iff,cmd):
-  s=socket.socket()
-  ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
-  s.close()
-  return ifreq
-
-def get_if_index(iff):
-  return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
-
-def set_promisc(s,iff,val=1):
-  mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, "")
-  if val:
-      cmd = PACKET_ADD_MEMBERSHIP
-  else:
-      cmd = PACKET_DROP_MEMBERSHIP
-  s.setsockopt(SOL_PACKET, cmd, mreq)
-
diff --git a/python/adapters/common/openflow/__init__.py b/python/common/openflow/__init__.py
similarity index 100%
rename from python/adapters/common/openflow/__init__.py
rename to python/common/openflow/__init__.py
diff --git a/python/adapters/common/openflow/utils.py b/python/common/openflow/utils.py
similarity index 99%
rename from python/adapters/common/openflow/utils.py
rename to python/common/openflow/utils.py
index 730c714..456ae06 100644
--- a/python/adapters/common/openflow/utils.py
+++ b/python/common/openflow/utils.py
@@ -15,7 +15,7 @@
 #
 import structlog
 
-from adapters.protos import openflow_13_pb2 as ofp
+from python.protos import openflow_13_pb2 as ofp
 from hashlib import md5
 
 log = structlog.get_logger()
diff --git a/python/common/utils/consulhelpers.py b/python/common/utils/consulhelpers.py
index df4dd58..853143b 100644
--- a/python/common/utils/consulhelpers.py
+++ b/python/common/utils/consulhelpers.py
@@ -21,7 +21,7 @@
 from structlog import get_logger
 from consul import Consul
 from random import randint
-from common.utils.nethelpers import get_my_primary_local_ipv4
+from nethelpers import get_my_primary_local_ipv4
 
 log = get_logger()
 
diff --git a/python/adapters/common/utils/registry.py b/python/common/utils/registry.py
similarity index 100%
rename from python/adapters/common/utils/registry.py
rename to python/common/utils/registry.py
diff --git a/python/docker/Dockerfile.adapter_ponsim_olt b/python/docker/Dockerfile.adapter_ponsim_olt
new file mode 100644
index 0000000..0c869de
--- /dev/null
+++ b/python/docker/Dockerfile.adapter_ponsim_olt
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+ENV PYTHONPATH=/voltha
+COPY common /voltha/python/common/
+COPY adapters/common /voltha/python/adapters/common/
+COPY adapters/kafka /voltha/python/adapters/kafka
+COPY adapters/*.py /voltha/python/adapters/
+COPY adapters/ponsim_olt /voltha/python/adapters/ponsim_olt
+RUN touch /voltha/python/__init__.py
+RUN touch /voltha/python/adapters/__init__.py
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /voltha/python/protos
+COPY --from=protos /protos/google/api /voltha/python/protos/third_party/google/api
+COPY protos/third_party/__init__.py /voltha/python/protos/third_party
+RUN touch /voltha/python/protos/__init__.py
+RUN touch /voltha/python/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/voltha/python/adapters/ponsim_olt/main.py"]
diff --git a/python/docker/Dockerfile.adapter_ponsim_onu b/python/docker/Dockerfile.adapter_ponsim_onu
new file mode 100644
index 0000000..c8d19f8
--- /dev/null
+++ b/python/docker/Dockerfile.adapter_ponsim_onu
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+ENV PYTHONPATH=/voltha
+COPY common /voltha/python/common/
+COPY adapters/common /voltha/python/adapters/common/
+COPY adapters/kafka /voltha/python/adapters/kafka
+COPY adapters/*.py /voltha/python/adapters/
+COPY adapters/ponsim_onu /voltha/python/adapters/ponsim_onu
+RUN touch /voltha/python/__init__.py
+RUN touch /voltha/python/adapters/__init__.py
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /voltha/python/protos
+COPY --from=protos /protos/google/api /voltha/python/protos/third_party/google/api
+COPY protos/third_party/__init__.py /voltha/python/protos/third_party
+RUN touch /voltha/python/protos/__init__.py
+RUN touch /voltha/python/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/voltha/python/adapters/ponsim_onu/main.py"]
diff --git a/python/docker/Dockerfile.cli b/python/docker/Dockerfile.cli
new file mode 100644
index 0000000..2c4fd45
--- /dev/null
+++ b/python/docker/Dockerfile.cli
@@ -0,0 +1,60 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /voltha && touch /voltha/__init__.py
+ENV PYTHONPATH=/voltha
+COPY common /voltha/python/common/
+COPY cli /voltha/python/cli
+RUN touch /voltha/python/__init__.py
+RUN touch /voltha/python/cli/__init__.py
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /voltha/python/protos
+COPY --from=protos /protos/google/api /voltha/python/protos/third_party/google/api
+COPY protos/third_party/__init__.py /voltha/python/protos/third_party
+RUN touch /voltha/python/protos/__init__.py
+RUN touch /voltha/python/protos/third_party/google/__init__.py
+
+# Setup the voltha user
+RUN useradd -b /home -d /home/voltha voltha -s /bin/bash
+RUN mkdir /home/voltha
+RUN chown voltha.voltha /home/voltha
+RUN echo "voltha:admin" | chpasswd
+RUN apt-get update && apt-get install -y openssh-server
+RUN apt-get update && apt-get install -y openssh-server
+RUN mkdir /var/run/sshd
+RUN echo 'root:screencast' | chpasswd
+RUN sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config
+
+# SSH login fix. Otherwise user is kicked off after login
+RUN sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd
+
+ENV NOTVISIBLE "in users profile"
+RUN echo "export VISIBLE=now" >> /etc/profile
+
+EXPOSE 22
+
+# Exposing process and default entry point
+ENTRYPOINT ["/usr/bin/dumb-init", "--"]
+
+CMD ["/voltha/python/cli/setup.sh"]
diff --git a/python/adapters/docker/Dockerfile.protoc b/python/docker/Dockerfile.protoc
similarity index 100%
rename from python/adapters/docker/Dockerfile.protoc
rename to python/docker/Dockerfile.protoc
diff --git a/python/adapters/docker/Dockerfile.protos b/python/docker/Dockerfile.protos
similarity index 100%
rename from python/adapters/docker/Dockerfile.protos
rename to python/docker/Dockerfile.protos
diff --git a/python/adapters/docker/config/Makefile.protos b/python/docker/config/Makefile.protos
similarity index 100%
rename from python/adapters/docker/config/Makefile.protos
rename to python/docker/config/Makefile.protos
diff --git a/python/adapters/env.sh b/python/env.sh
similarity index 93%
rename from python/adapters/env.sh
rename to python/env.sh
index f4f9f97..ec3b52f 100644
--- a/python/adapters/env.sh
+++ b/python/env.sh
@@ -26,4 +26,4 @@
 . $VENVDIR/bin/activate
 
 # add top-level voltha dir to pythonpath
-export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/protos/third_party
+export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/cli:$VOLTHA_BASE/protos/third_party
diff --git a/python/adapters/protos/Makefile b/python/protos/Makefile
similarity index 100%
rename from python/adapters/protos/Makefile
rename to python/protos/Makefile
diff --git a/python/adapters/protos/__init__.py b/python/protos/__init__.py
similarity index 100%
rename from python/adapters/protos/__init__.py
rename to python/protos/__init__.py
diff --git a/python/adapters/protos/third_party/__init__.py b/python/protos/third_party/__init__.py
similarity index 96%
rename from python/adapters/protos/third_party/__init__.py
rename to python/protos/third_party/__init__.py
index 2740afe..1767870 100644
--- a/python/adapters/protos/third_party/__init__.py
+++ b/python/protos/third_party/__init__.py
@@ -38,7 +38,7 @@
     def load_module(self, name):
         if name in sys.modules:
             return sys.modules[name]
-        full_name = 'adapters.protos.third_party.' + name
+        full_name = 'python.protos.third_party.' + name
         import_module(full_name)
         module = sys.modules[full_name]
         sys.modules[name] = module
diff --git a/python/adapters/protos/third_party/google/LICENSE b/python/protos/third_party/google/LICENSE
similarity index 100%
rename from python/adapters/protos/third_party/google/LICENSE
rename to python/protos/third_party/google/LICENSE
diff --git a/python/adapters/protos/third_party/google/__init__.py b/python/protos/third_party/google/__init__.py
similarity index 100%
rename from python/adapters/protos/third_party/google/__init__.py
rename to python/protos/third_party/google/__init__.py
diff --git a/python/adapters/protos/third_party/google/api/__init__.py b/python/protos/third_party/google/api/__init__.py
similarity index 100%
rename from python/adapters/protos/third_party/google/api/__init__.py
rename to python/protos/third_party/google/api/__init__.py
diff --git a/python/adapters/protos/third_party/google/api/annotations.proto b/python/protos/third_party/google/api/annotations.proto
similarity index 100%
rename from python/adapters/protos/third_party/google/api/annotations.proto
rename to python/protos/third_party/google/api/annotations.proto
diff --git a/python/adapters/protos/third_party/google/api/http.proto b/python/protos/third_party/google/api/http.proto
similarity index 100%
rename from python/adapters/protos/third_party/google/api/http.proto
rename to python/protos/third_party/google/api/http.proto
diff --git a/rw_core/core/adapter_proxy.go b/rw_core/core/adapter_proxy.go
index 643c9de..ab35037 100644
--- a/rw_core/core/adapter_proxy.go
+++ b/rw_core/core/adapter_proxy.go
@@ -250,6 +250,33 @@
 	return nil, nil
 }
 
+func (ap *AdapterProxy) packetOut(deviceType string, deviceId string, outPort uint32, packet *openflow_13.OfpPacketOut) error {
+	log.Debugw("packetOut", log.Fields{"deviceId": deviceId})
+	topic := kafka.Topic{Name: deviceType}
+	rpc := "receive_packet_out"
+	dId := &ca.StrType{Val:deviceId}
+	args := make([]*kafka.KVArg, 3)
+	args[0] = &kafka.KVArg{
+		Key:   "deviceId",
+		Value: dId,
+	}
+	op := &ca.IntType{Val:int64(outPort)}
+	args[1] = &kafka.KVArg{
+		Key:   "outPort",
+		Value: op,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: packet,
+	}
+
+	// TODO:  Do we need to wait for an ACK on a packet Out?
+	success, result := ap.kafkaProxy.InvokeRPC(nil, rpc, &topic, false, args...)
+	log.Debugw("packetOut", log.Fields{"deviceid": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
+}
+
+
 func (ap *AdapterProxy) UpdateFlowsBulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups) error {
 	log.Debugw("UpdateFlowsBulk", log.Fields{"deviceId": device.Id})
 	topic := kafka.Topic{Name: device.Type}
diff --git a/rw_core/core/adapter_request_handler.go b/rw_core/core/adapter_request_handler.go
index 570b445..85e43be 100644
--- a/rw_core/core/adapter_request_handler.go
+++ b/rw_core/core/adapter_request_handler.go
@@ -457,3 +457,41 @@
 
 	return new(empty.Empty), nil
 }
+
+
+func (rhp *AdapterRequestHandlerProxy) PacketIn(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) < 3 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &voltha.ID{}
+	portNo := &ca.IntType{}
+	packet := &ca.Packet{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device_id":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				log.Warnw("cannot-unmarshal-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "port":
+			if err := ptypes.UnmarshalAny(arg.Value, portNo); err != nil {
+				log.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
+				return nil, err
+			}
+		case "packet":
+			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
+				log.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				return nil, err
+			}
+
+		}
+	}
+	log.Debugw("PacketIn", log.Fields{"deviceId": deviceId.Id, "port": portNo.Val,  "packet": packet})
+	if rhp.TestMode { // Execute only for test cases
+		return nil, nil
+	}
+	go rhp.deviceMgr.PacketIn(deviceId.Id, uint32(portNo.Val), packet.Payload)
+	return new(empty.Empty), nil
+}
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index e045fc9..92f00bf 100644
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -33,6 +33,7 @@
 
 type DeviceAgent struct {
 	deviceId         string
+	deviceType 		string
 	lastData         *voltha.Device
 	adapterProxy     *AdapterProxy
 	deviceMgr        *DeviceManager
@@ -60,6 +61,7 @@
 		cloned.Vlan = device.ProxyAddress.ChannelId
 	}
 	agent.deviceId = cloned.Id
+	agent.deviceType = cloned.Type
 	agent.lastData = cloned
 	agent.deviceMgr = deviceMgr
 	agent.exitChannel = make(chan int, 1)
@@ -368,6 +370,16 @@
 	}
 }
 
+func (agent *DeviceAgent) packetOut(outPort uint32, packet *ofp.OfpPacketOut) error {
+	//	Send packet to adapter
+	if err := agent.adapterProxy.packetOut(agent.deviceType, agent.deviceId, outPort, packet); err != nil {
+		log.Debugw("packet-out-error", log.Fields{"id": agent.lastData.Id, "error": err})
+		return err
+	}
+	return nil
+}
+
+
 // TODO: implement when callback from the data model is ready
 // processUpdate is a callback invoked whenever there is a change on the device manages by this device agent
 func (agent *DeviceAgent) processUpdate(args ...interface{}) interface{} {
diff --git a/rw_core/core/device_manager.go b/rw_core/core/device_manager.go
index b2ab478..c4ac343 100644
--- a/rw_core/core/device_manager.go
+++ b/rw_core/core/device_manager.go
@@ -362,6 +362,34 @@
 	return nil
 }
 
+func (dMgr *DeviceManager) packetOut(deviceId string, outPort uint32, packet *ofp.OfpPacketOut) error {
+	log.Debugw("packetOut", log.Fields{"deviceId": deviceId, "outPort": outPort})
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.packetOut(outPort, packet)
+	}
+	return status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) PacketIn(deviceId string, port uint32, packet []byte) error {
+	log.Debugw("PacketIn", log.Fields{"deviceId": deviceId, "port": port})
+	// Get the logical device Id based on the deviceId
+	var device *voltha.Device
+	var err error
+	if device, err = dMgr.GetDevice(deviceId); err != nil {
+		log.Errorw("device-not-found", log.Fields{"deviceId": deviceId})
+		return err
+	}
+	if !device.Root{
+		log.Errorw("device-not-root", log.Fields{"deviceId": deviceId})
+		return status.Errorf(codes.FailedPrecondition, "%s", deviceId)
+	}
+
+	if err := dMgr.logicalDeviceMgr.packetIn(device.ParentId, port, packet); err != nil {
+		return err
+	}
+	return nil
+}
+
 func (dMgr *DeviceManager) createLogicalDevice(cDevice *voltha.Device) error {
 	log.Info("createLogicalDevice")
 	var logicalId *string
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index 5c9eced..4f53474 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -1023,3 +1023,19 @@
 	}
 	return nil
 }
+
+func (agent *LogicalDeviceAgent) packetOut(packet *ofp.OfpPacketOut ) {
+	log.Debugw("packet-out", log.Fields{"packet": packet.GetInPort()})
+	outPort := fd.GetPacketOutPort(packet)
+	//frame := packet.GetData()
+	//TODO: Use a channel between the logical agent and the device agent
+	agent.deviceMgr.packetOut(agent.rootDeviceId, outPort, packet)
+}
+
+
+func (agent *LogicalDeviceAgent) packetIn(port uint32, packet []byte) {
+	log.Debugw("packet-in", log.Fields{"port": port, "packet": packet})
+	packet_in := fd.MkPacketIn(port, packet)
+	log.Debugw("sending-packet-in", log.Fields{"packet-in": packet_in})
+}
+
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
index 9d365aa..4625518 100644
--- a/rw_core/core/logical_device_manager.go
+++ b/rw_core/core/logical_device_manager.go
@@ -324,3 +324,23 @@
 	}
 	sendAPIResponse(ctx, ch, res)
 }
+
+func (ldMgr *LogicalDeviceManager) packetOut( packetOut *openflow_13.PacketOut) {
+	log.Debugw("packetOut", log.Fields{"logicalDeviceId": packetOut.Id})
+	if agent := ldMgr.getLogicalDeviceAgent(packetOut.Id); agent != nil {
+		agent.packetOut(packetOut.PacketOut)
+	} else {
+		log.Error("logical-device-not-exist", log.Fields{"logicalDeviceId": packetOut.Id})
+	}
+}
+
+func (ldMgr *LogicalDeviceManager) packetIn(logicalDeviceId string, port uint32, packet []byte) error {
+	log.Debugw("packetIn", log.Fields{"logicalDeviceId": logicalDeviceId, "port": port})
+	if agent := ldMgr.getLogicalDeviceAgent(logicalDeviceId); agent != nil {
+		agent.packetIn(port, packet)
+	} else {
+		log.Error("logical-device-not-exist", log.Fields{"logicalDeviceId": logicalDeviceId})
+	}
+	return nil
+}
+
diff --git a/rw_core/flow_decomposition/flow_decomposer.go b/rw_core/flow_decomposition/flow_decomposer.go
index 284bef2..bd0e591 100644
--- a/rw_core/flow_decomposition/flow_decomposer.go
+++ b/rw_core/flow_decomposition/flow_decomposer.go
@@ -407,6 +407,18 @@
 	}
 }
 
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
 func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
 	if flow == nil {
 		return 0
@@ -696,6 +708,24 @@
 	return mod
 }
 
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass:ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data:packet,
+	}
+	return packetIn
+}
+
 // MkFlowStat is a helper method to build flows
 func MkFlowStat(fa *fu.FlowArgs) *ofp.OfpFlowStats {
 	//Build the matchfields