This commit cleans up the python directory to ensure the adapters
and the cli runs properly.

Change-Id: Ic68a3ecd1f16a5af44296e3c020c808b185f4c18
diff --git a/python/adapters/Makefile b/python/adapters/Makefile
deleted file mode 100644
index 2531985..0000000
--- a/python/adapters/Makefile
+++ /dev/null
@@ -1,226 +0,0 @@
-#
-# Copyright 2018 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-ifneq ($(VOLTHA_BUILD),docker)
-ifeq ($(VOLTHA_BASE)_set,_set)
-$(error To get started, please source the env.sh file)
-endif
-endif
-
-ifeq ($(TAG),)
-TAG := latest
-endif
-
-ifeq ($(TARGET_TAG),)
-TARGET_TAG := latest
-endif
-
-# If no DOCKER_HOST_IP is specified grab a v4 IP address associated with
-# the default gateway
-ifeq ($(DOCKER_HOST_IP),)
-DOCKER_HOST_IP := $(shell ifconfig $$(netstat -rn | grep -E '^(default|0.0.0.0)' | head -1 | awk '{print $$NF}') | grep inet | awk '{print $$2}' | sed -e 's/addr://g')
-endif
-
-ifneq ($(http_proxy)$(https_proxy),)
-# Include proxies from the environment
-DOCKER_PROXY_ARGS = \
-       --build-arg http_proxy=$(http_proxy) \
-       --build-arg https_proxy=$(https_proxy) \
-       --build-arg ftp_proxy=$(ftp_proxy) \
-       --build-arg no_proxy=$(no_proxy) \
-       --build-arg HTTP_PROXY=$(HTTP_PROXY) \
-       --build-arg HTTPS_PROXY=$(HTTPS_PROXY) \
-       --build-arg FTP_PROXY=$(FTP_PROXY) \
-       --build-arg NO_PROXY=$(NO_PROXY)
-endif
-
-DOCKER_BUILD_ARGS = \
-	--build-arg TAG=$(TAG) \
-	--build-arg REGISTRY=$(REGISTRY) \
-	--build-arg REPOSITORY=$(REPOSITORY) \
-	$(DOCKER_PROXY_ARGS) $(DOCKER_CACHE_ARG) \
-	 --rm --force-rm \
-	$(DOCKER_BUILD_EXTRA_ARGS)
-
-VENVDIR := venv-$(shell uname -s | tr '[:upper:]' '[:lower:]')
-
-DOCKER_IMAGE_LIST = \
-	base \
-	protoc \
-	protos \
-	adapter-ponsim-olt \
-	adapter-ponsim-onu \
-
-# The following list was scavanged from the compose / stack files as well as
-# from the Dockerfiles. If nothing else it highlights that VOLTHA is not
-# using consistent versions for some of the containers.
-
-FETCH_BUILD_IMAGE_LIST = \
-       alpine:3.6 \
-       centos:7 \
-       centurylink/ca-certs:latest \
-       grpc/python:latest \
-       ubuntu:xenial
-
-FETCH_COMPOSE_IMAGE_LIST = \
-        wurstmeister/kafka:latest \
-        wurstmeister/zookeeper:latest
-
-# find k8s -type f | xargs grep image: | awk '{print $NF}' | sed -e 's/\"//g' | sed '/:.*$/!s/$/:latest/g' | sort -u | sed -e 's/^/       /g' -e 's/$/ \\/g'
-# Manually remove some image from this list as they don't reflect the new 
-# naming conventions for the VOLTHA build
-FETCH_K8S_IMAGE_LIST = \
-       wurstmeister/kafka:1.0.0 \
-       zookeeper:3.4.11
-
-FETCH_IMAGE_LIST = $(shell echo $(FETCH_BUILD_IMAGE_LIST) $(FETCH_COMPOSE_IMAGE_LIST) $(FETCH_K8S_IMAGE_LIST) | tr ' ' '\n' | sort -u)
-
-.PHONY: $(DIRS) $(DIRS_CLEAN) $(DIRS_FLAKE8) flake8 base ponsim_olt ponsim_onu protos kafka common start stop tag push pull
-
-# This should to be the first and default target in this Makefile
-help:
-	@echo "Usage: make [<target>]"
-	@echo "where available targets are:"
-	@echo
-	@echo "build        : Build the Adapters protos and docker images.\n\
-               If this is the first time you are building, choose \"make build\" option."
-	@echo "clean        : Remove files created by the build and tests"
-	@echo "distclean    : Remove venv directory"
-	@echo "fetch        : Pre-fetch artifacts for subsequent local builds"
-	@echo "help         : Print this help"
-	@echo "protoc       : Build a container with protoc installed"
-	@echo "protos       : Compile all grpc/protobuf files"
-	@echo "rebuild-venv : Rebuild local Python virtualenv from scratch"
-	@echo "venv         : Build local Python virtualenv if did not exist yet"
-	@echo "containers   : Build all the docker containers"
-	@echo "base         : Build the base docker container used by all other dockers"
-	@echo "adapter_ponsim_olt       : Build the ponsim olt adapter docker container"
-	@echo "adapter_ponsim_onu       : Build the ponsim olt adapter docker container"
-	@echo "tag          : Tag a set of images"
-	@echo "push         : Push the docker images to an external repository"
-	@echo "pull         : Pull the docker images from a repository"
-	@echo
-
-## New directories can be added here
-#DIRS:=
-
-## If one directory depends on another directory that
-## dependency can be expressed here
-##
-## For example, if the Tibit directory depended on the eoam
-## directory being built first, then that can be expressed here.
-##  driver/tibit: eoam
-
-# Parallel Build
-$(DIRS):
-	@echo "    MK $@"
-	$(Q)$(MAKE) -C $@
-
-# Parallel Clean
-DIRS_CLEAN = $(addsuffix .clean,$(DIRS))
-$(DIRS_CLEAN):
-	@echo "    CLEAN $(basename $@)"
-	$(Q)$(MAKE) -C $(basename $@) clean
-
-# Parallel Flake8
-DIRS_FLAKE8 = $(addsuffix .flake8,$(DIRS))
-$(DIRS_FLAKE8):
-	@echo "    FLAKE8 $(basename $@)"
-	-$(Q)$(MAKE) -C $(basename $@) flake8
-
-build: protoc protos containers
-
-containers: base adapter_ponsim_olt adapter_ponsim_onu
-
-base:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-base:${TAG} -f docker/Dockerfile.base .
-
-adapter_ponsim_olt:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-olt:${TAG} -f docker/Dockerfile.adapter_ponsim_olt .
-
-adapter_ponsim_onu:
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-adapter-ponsim-onu:${TAG} -f docker/Dockerfile.adapter_ponsim_onu .
-
-tag: $(patsubst  %,%.tag,$(DOCKER_IMAGE_LIST))
-
-push: tag $(patsubst  %,%.push,$(DOCKER_IMAGE_LIST))
-
-pull: $(patsubst  %,%.pull,$(DOCKER_IMAGE_LIST))
-
-%.tag:
-	docker tag ${REGISTRY}${REPOSITORY}voltha-$(subst .tag,,$@):${TAG} ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .tag,,$@):${TARGET_TAG}
-
-%.push:
-	docker push ${TARGET_REGISTRY}${TARGET_REPOSITORY}voltha-$(subst .push,,$@):${TARGET_TAG}
-
-%.pull:
-	docker pull ${REGISTRY}${REPOSITORY}voltha-$(subst .pull,,$@):${TAG}
-
-protoc:
-ifeq ($(VOLTHA_BUILD),docker)
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} -f docker/Dockerfile.protoc .
-endif
-
-protos:
-ifneq ($(VOLTHA_BUILD),docker)
-	make -C protos
-else
-	cp ../protos/*.proto ./protos
-	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} -f docker/Dockerfile.protos .
-endif
-
-install-protoc:
-	make -C voltha/protos install-protoc
-
-clean:
-	find voltha -name '*.pyc' | xargs rm -f
-
-distclean: clean
-	rm -rf ${VENVDIR}
-
-fetch:
-	@bash -c ' \
-		for i in $(FETCH_IMAGE_LIST); do \
-			docker pull $$i; \
-		done'
-
-purge-venv:
-	rm -fr ${VENVDIR}
-
-rebuild-venv: purge-venv venv
-
-ifneq ($(VOLTHA_BUILD),docker)
-venv: ${VENVDIR}/.built
-else
-venv:
-endif
-
-${VENVDIR}/.built:
-	@ virtualenv ${VENVDIR}
-	@ . ${VENVDIR}/bin/activate && \
-	    pip install --upgrade pip; \
-	    if ! pip install -r requirements.txt; \
-	    then \
-	        echo "On MAC OS X, if the installation failed with an error \n'<openssl/opensslv.h>': file not found,"; \
-	        echo "see the BUILD.md file for a workaround"; \
-	    else \
-	        uname -s > ${VENVDIR}/.built; \
-	    fi
-
-
-flake8: $(DIRS_FLAKE8)
-
-# end file
diff --git a/python/adapters/common/__init__.py b/python/adapters/common/__init__.py
index b0fb0b2..58aca1e 100644
--- a/python/adapters/common/__init__.py
+++ b/python/adapters/common/__init__.py
@@ -1,10 +1,10 @@
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018 the original author or authors.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
 # You may obtain a copy of the License at
 #
-# http://www.apache.org/licenses/LICENSE-2.0
+#      http://www.apache.org/licenses/LICENSE-2.0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/python/adapters/common/event_bus.py b/python/adapters/common/event_bus.py
deleted file mode 100644
index e717c16..0000000
--- a/python/adapters/common/event_bus.py
+++ /dev/null
@@ -1,194 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-A simple internal pub/sub event bus with topics and filter-based registration.
-"""
-import re
-
-import structlog
-
-
-log = structlog.get_logger()
-
-
-class _Subscription(object):
-
-    __slots__ = ('bus', 'predicate', 'callback', 'topic')
-    def __init__(self, bus, predicate, callback, topic=None):
-        self.bus = bus
-        self.predicate = predicate
-        self.callback = callback
-        self.topic = topic
-
-
-class EventBus(object):
-
-    def __init__(self):
-        self.subscriptions = {}  # topic -> list of _Subscription objects
-                                 # topic None holds regexp based topic subs.
-        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
-
-    def list_subscribers(self, topic=None):
-        if topic is None:
-            return sum(self.subscriptions.itervalues(), [])
-        else:
-            if topic in self.subscriptions:
-                return self.subscriptions[topic]
-            else:
-                return []
-
-    @staticmethod
-    def _get_topic_key(topic):
-        if isinstance(topic, str):
-            return topic
-        elif hasattr(topic, 'match'):
-            return None
-        else:
-            raise AttributeError('topic not a string nor a compiled regex')
-
-    def subscribe(self, topic, callback, predicate=None):
-        """
-        Subscribe to given topic with predicate and register the callback
-        :param topic: String topic (explicit) or regexp based topic filter.
-        :param callback: Callback method with signature def func(topic, msg)
-        :param predicate: Optional method/function signature def predicate(msg)
-        :return: Subscription object which can be used to unsubscribe
-        """
-        subscription = _Subscription(self, predicate, callback, topic)
-        topic_key = self._get_topic_key(topic)
-        self.subscriptions.setdefault(topic_key, []).append(subscription)
-        self.subs_topic_map[subscription] = topic_key
-        return subscription
-
-    def unsubscribe(self, subscription):
-        """
-        Remove given subscription
-        :param subscription: subscription object as was returned by subscribe
-        :return: None
-        """
-        topic_key = self.subs_topic_map[subscription]
-        self.subscriptions[topic_key].remove(subscription)
-
-    def publish(self, topic, msg):
-        """
-        Publish given message to all subscribers registered with topic taking
-        the predicate functions into account.
-        :param topic: String topic
-        :param msg: Arbitrary python data as message
-        :return: None
-        """
-        from copy import copy
-
-        def passes(msg, predicate):
-            try:
-                return predicate(msg)
-            except Exception, e:
-                return False  # failed predicate function treated as no match
-
-        # lookup subscribers with explicit topic subscriptions
-        subscribers = self.subscriptions.get(topic, [])
-
-        # add matching regexp topic subscribers
-        subscribers.extend(s for s in self.subscriptions.get(None, [])
-                           if s.topic.match(topic))
-
-        # iterate over a shallow-copy of subscribers
-        for candidate in copy(subscribers):
-            predicate = candidate.predicate
-            if predicate is None or passes(msg, predicate):
-                try:
-                    candidate.callback(topic, msg)
-                except Exception, e:
-                    log.exception('callback-failed', e=repr(e), topic=topic)
-
-
-
-default_bus = EventBus()
-
-
-class EventBusClient(object):
-    """
-    Primary interface to the EventBus. Usage:
-
-    Publish:
-    >>> events = EventBusClient()
-    >>> msg = dict(a=1, b='foo')
-    >>> events.publish('a.topic', msg)
-
-    Subscribe to get all messages on specific topic:
-    >>> def got_event(topic, msg):
-    >>>     print topic, ':', msg
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', got_event)
-
-    Subscribe to get messages matching predicate on specific topic:
-    >>> def got_event(topic, msg):
-    >>>     print topic, ':', msg
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
-
-    Use a DeferredQueue to buffer incoming messages
-    >>> queue = DeferredQueue()
-    >>> events = EventBusClient()
-    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
-
-    """
-    def __init__(self, bus=None):
-        """
-        Obtain a client interface for the pub/sub event bus.
-        :param bus: An optional specific event bus. Inteded for mainly test
-        use. If not provided, the process default bus will be used, which is
-        the preferred use (a process shall not need more than one bus).
-        """
-        self.bus = bus or default_bus
-
-    def publish(self, topic, msg):
-        """
-        Publish given msg to given topic.
-        :param topic: String topic
-        :param msg: Arbitrary python data as message
-        :return: None
-        """
-        self.bus.publish(topic, msg)
-
-    def subscribe(self, topic, callback, predicate=None):
-        """
-        Subscribe to given topic with predicate and register the callback
-        :param topic: String topic (explicit) or regexp based topic filter.
-        :param callback: Callback method with signature def func(topic, msg)
-        :param predicate: Optional method/function with signature
-        def predicate(msg)
-        :return: Subscription object which can be used to unsubscribe
-        """
-        return self.bus.subscribe(topic, callback, predicate)
-
-    def unsubscribe(self, subscription):
-        """
-        Remove given subscription
-        :param subscription: subscription object as was returned by subscribe
-        :return: None
-        """
-        return self.bus.unsubscribe(subscription)
-
-    def list_subscribers(self, topic=None):
-        """
-        Return list of subscribers. If topci is provided, it is filtered for
-        those subscribing to the topic.
-        :param topic: Optional topic
-        :return: List of subscriptions
-        """
-        return self.bus.list_subscribers(topic)
diff --git a/python/adapters/common/frameio/frameio.py b/python/adapters/common/frameio/frameio.py
index 2f68ef8..0657257 100644
--- a/python/adapters/common/frameio/frameio.py
+++ b/python/adapters/common/frameio/frameio.py
@@ -40,10 +40,10 @@
 from twisted.internet import reactor
 from zope.interface import implementer
 
-from adapters.common.utils.registry import IComponent
+from python.common.utils.registry import IComponent
 
 if sys.platform.startswith('linux'):
-    from adapters.common.frameio.third_party.oftest import afpacket, netutils
+    from third_party.oftest import afpacket, netutils
 elif sys.platform == 'darwin':
     from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
 
diff --git a/python/adapters/common/openflow/__init__.py b/python/adapters/common/kvstore/__init__.py
similarity index 91%
copy from python/adapters/common/openflow/__init__.py
copy to python/adapters/common/kvstore/__init__.py
index b0fb0b2..4a82628 100644
--- a/python/adapters/common/openflow/__init__.py
+++ b/python/adapters/common/kvstore/__init__.py
@@ -1,4 +1,4 @@
-# Copyright 2017-present Open Networking Foundation
+# Copyright 2018-present Open Networking Foundation
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
diff --git a/python/adapters/common/kvstore/consul_client.py b/python/adapters/common/kvstore/consul_client.py
new file mode 100644
index 0000000..789e797
--- /dev/null
+++ b/python/adapters/common/kvstore/consul_client.py
@@ -0,0 +1,304 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair, RETRY_BACKOFF
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
+from consul import ConsulException
+from consul.twisted import Consul
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
+
+log = get_logger()
+
+class ConsulClient(KVClient):
+
+    def __init__(self, kv_host, kv_port):
+        KVClient.__init__(self, kv_host, kv_port)
+        self.session_id = None
+        self.client = Consul(kv_host, kv_port)
+
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        self._retriggering_watch(key, key_change_callback, timeout)
+
+    @inlineCallbacks
+    def _retriggering_watch(self, key, key_change_callback, timeout):
+        self.key_watches[key] = ConsulWatch(self.client, key, key_change_callback, timeout)
+        yield self.key_watches[key].start()
+
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        if key in self.key_watches:
+            self.key_watches[key].stop()
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        log.debug('kv-op', operation=operation, key=key, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        while True:
+            try:
+                if operation == 'GET':
+                    result = yield self._get(key, **kw)
+                elif operation == 'LIST':
+                    result, err = yield self._list(key)
+                elif operation == 'PUT':
+                    # Put returns a boolean response
+                    result = yield self.client.kv.put(key, value)
+                    if not result:
+                        err = 'put-failed'
+                elif operation == 'DELETE':
+                    # Delete returns a boolean response
+                    result = yield self.client.kv.delete(key)
+                    if not result:
+                        err = 'delete-failed'
+                elif operation == 'RESERVE':
+                    result, err = yield self._reserve(key, value, **kw)
+                elif operation == 'RENEW':
+                    result, err = yield self._renew_reservation(key)
+                elif operation == 'RELEASE':
+                    result, err = yield self._release_reservation(key)
+                elif operation == 'RELEASE-ALL':
+                    err = yield self._release_all_reservations()
+                self._clear_backoff()
+                break
+            except ConsulException as ex:
+                if 'ConnectionRefusedError' in ex.message:
+                    log.exception('comms-exception', ex=ex)
+                    yield self._backoff('consul-not-up')
+                else:
+                    log.error('consul-specific-exception', ex=ex)
+                    err = ex
+            except Exception as ex:
+                log.error('consul-exception', ex=ex)
+                err = ex
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue((result,err))
+
+    @inlineCallbacks
+    def _get(self, key, **kw):
+        kvp = None
+        index, rec = yield self.client.kv.get(key, **kw)
+        if rec is not None:
+            kvp = KVPair(rec['Key'], rec['Value'], index)
+        returnValue(kvp)
+
+    @inlineCallbacks
+    def _list(self, key):
+        err = None
+        list = []
+        index, recs = yield self.client.kv.get(key, recurse=True)
+        for rec in recs:
+            list.append(KVPair(rec['Key'], rec['Value'], rec['ModifyIndex']))
+        returnValue((list, err))
+
+    @inlineCallbacks
+    def _reserve(self, key, value, **kw):
+        for name, val in kw.items():
+            if name == 'ttl':
+                ttl = val
+                break
+        reserved = False
+        err = 'reservation-failed'
+        owner = None
+
+        # Create a session
+        self.session_id = yield self.client.session.create(behavior='delete',
+                                                           ttl=ttl) # lock_delay=1)
+        log.debug('create-session', id=self.session_id)
+        # Try to acquire the key
+        result = yield self.client.kv.put(key, value, acquire=self.session_id)
+        log.debug('key-acquire', key=key, value=value, sess=self.session_id, result=result)
+
+        # Check if reservation succeeded
+        index, record = yield self.client.kv.get(key)
+        if record is not None and 'Value' in record:
+            owner = record['Value']
+            log.debug('get-key', session=record['Session'], owner=owner)
+            if record['Session'] == self.session_id and owner == value:
+                reserved = True
+                log.debug('key-reserved', key=key, value=value, ttl=ttl)
+                # Add key to reservation list
+                self.key_reservations[key] = self.session_id
+            else:
+                log.debug('reservation-held-by-another', owner=owner)
+
+        if reserved:
+            err = None
+        returnValue((owner, err))
+
+    @inlineCallbacks
+    def _renew_reservation(self, key):
+        result = None
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            session_id = self.key_reservations[key]
+            # A successfully renewed session returns an object with fields:
+            # Node, CreateIndex, Name, ModifyIndex, ID, Behavior, TTL,
+            # LockDelay, and Checks
+            result = yield self.client.session.renew(session_id=session_id)
+            log.debug('session-renew', result=result)
+        if result is None:
+            err = 'session-renewal-failed'
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_reservation(self, key):
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            session_id = self.key_reservations[key]
+            # A successfully destroyed session returns a boolean result
+            success = yield self.client.session.destroy(session_id)
+            log.debug('session-destroy', result=success)
+            if not success:
+                err = 'session-destroy-failed'
+            self.session_id = None
+            self.key_reservations.pop(key)
+        returnValue((success, err))
+
+    @inlineCallbacks
+    def _release_all_reservations(self):
+        err = None
+        keys_to_delete = []
+        for key in self.key_reservations:
+            session_id = self.key_reservations[key]
+            # A successfully destroyed session returns a boolean result
+            success = yield self.client.session.destroy(session_id)
+            if not success:
+                err = 'session-destroy-failed'
+                log.debug('session-destroy', id=session_id, result=success)
+            self.session_id = None
+            keys_to_delete.append(key)
+        for key in keys_to_delete:
+            self.key_reservations.pop(key)
+        returnValue(err)
+
+
+class ConsulWatch():
+
+    def __init__(self, consul, key, callback, timeout):
+        self.client = consul
+        self.key = key
+        self.index = None
+        self.callback = callback
+        self.timeout = timeout
+        self.period = 60
+        self.running = True
+        self.retries = 0
+        self.retry_time = 0
+
+    @inlineCallbacks
+    def start(self):
+        self.running = True
+        index, rec = yield self._get_with_retry(self.key, None,
+                                              timeout=self.timeout)
+        self.index = str(index)
+
+        @inlineCallbacks
+        def _get(key, deferred):
+            try:
+                index, rec = yield self._get_with_retry(key, None,
+                                                     timeout=self.timeout,
+                                                     index=self.index)
+                self.index = str(index)
+                if not deferred.called:
+                    log.debug('got-result-cancelling-deferred')
+                    deferred.callback((self.index, rec))
+            except Exception as e:
+                log.exception('got-exception', e=e)
+
+        while self.running:
+            try:
+                rcvd = DeferredWithTimeout(timeout=self.period)
+                _get(self.key, rcvd)
+                try:
+                    # Update index for next watch iteration
+                    index, rec = yield rcvd
+                    log.debug('event-received', index=index, rec=rec)
+                    # Notify client of key change event
+                    if rec is None:
+                        # Key has been deleted
+                        self._send_event(Event(Event.DELETE, self.key, None))
+                    else:
+                        self._send_event(Event(Event.PUT, rec['Key'], rec['Value']))
+                except TimeOutError as e:
+                    log.debug('no-events-over-watch-period', key=self.key)
+                except Exception as e:
+                    log.exception('exception', e=e)
+            except Exception as e:
+                log.exception('exception', e=e)
+
+        log.debug('close-watch', key=self.key)
+
+    def stop(self):
+        self.running = False
+        self.callback = None
+
+    @inlineCallbacks
+    def _get_with_retry(self, key, value, timeout, *args, **kw):
+        log.debug('watch-period', key=key, period=self.period, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        while True:
+            try:
+                result = yield self.client.kv.get(key, **kw)
+                self._clear_backoff()
+                break
+            except ConsulException as ex:
+                err = ex
+                if 'ConnectionRefusedError' in ex.message:
+                    self._send_event(Event(Event.CONNECTION_DOWN, self.key, None))
+                    log.exception('comms-exception', ex=ex)
+                    yield self._backoff('consul-not-up')
+                else:
+                    log.error('consul-specific-exception', ex=ex)
+            except Exception as ex:
+                err = ex
+                log.error('consul-exception', ex=ex)
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue(result)
+
+    def _send_event(self, event):
+        if self.callback is not None:
+            self.callback(event)
+
+    def _backoff(self, msg):
+        wait_time = RETRY_BACKOFF[min(self.retries, len(RETRY_BACKOFF) - 1)]
+        self.retry_time += wait_time
+        self.retries += 1
+        log.error(msg, next_retry_in_secs=wait_time,
+                  total_delay_in_secs = self.retry_time,
+                  retries=self.retries)
+        return asleep(wait_time)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.debug('reconnected-to-kv', after_retries=self.retries)
+            self.retries = 0
+            self.retry_time = 0
diff --git a/python/adapters/common/kvstore/etcd_client.py b/python/adapters/common/kvstore/etcd_client.py
new file mode 100644
index 0000000..e1850e7
--- /dev/null
+++ b/python/adapters/common/kvstore/etcd_client.py
@@ -0,0 +1,240 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+################################################################################
+#
+# Most of the txaioetcd methods provide a timeout parameter. This parameter
+# is likely intended to limit the amount of time spent by any one method
+# waiting for a response from the etcd server. However, if the server is
+# down, the method immediately throws a ConnectionRefusedError exception;
+# it does not perform any retries. The timeout parameter provided by the
+# methods in EtcdClient cover this contingency.
+#
+################################################################################
+
+from kv_client import DEFAULT_TIMEOUT, Event, KVClient, KVPair
+from structlog import get_logger
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred
+from twisted.internet.error import ConnectionRefusedError
+from txaioetcd import Client, CompVersion, Failed, KeySet, OpGet, OpSet, Transaction
+
+log = get_logger()
+
+class EtcdClient(KVClient):
+
+    def __init__(self, kv_host, kv_port):
+        KVClient.__init__(self, kv_host, kv_port)
+        self.url = u'http://' + kv_host + u':' + str(kv_port)
+        self.client = Client(reactor, self.url)
+
+    @inlineCallbacks
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        self.key_watches[key] = key_change_callback
+        result = yield self._op_with_retry('WATCH', key, None, timeout, callback=self.key_changed)
+        returnValue(result)
+
+    def key_changed(self, kv):
+        key = kv.key
+        value = kv.value
+        log.debug('key-changed', key=key, value=value)
+        # Notify client of key change event
+        if value is not None:
+            evt = Event(Event.PUT, key, value)
+        else:
+            evt = Event(Event.DELETE, key, None)
+        if key in self.key_watches:
+            self.key_watches[key](evt)
+
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        log.debug('close-watch', key=key)
+        if key in self.key_watches:
+            self.key_watches.pop(key)
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        log.debug('kv-op', operation=operation, key=key, timeout=timeout, args=args, kw=kw)
+        err = None
+        result = None
+        if type(key) == str:
+            key = bytes(key)
+        if value is not None:
+           value = bytes(value)
+        while True:
+            try:
+                if operation == 'GET':
+                    result = yield self._get(key)
+                elif operation == 'LIST':
+                    result, err = yield self._list(key)
+                elif operation == 'PUT':
+                    # Put returns an object of type Revision
+                    result = yield self.client.set(key, value, **kw)
+                elif operation == 'DELETE':
+                    # Delete returns an object of type Deleted
+                    result = yield self.client.delete(key)
+                elif operation == 'RESERVE':
+                    result, err = yield self._reserve(key, value, **kw)
+                elif operation == 'RENEW':
+                    result, err = yield self._renew_reservation(key)
+                elif operation == 'RELEASE':
+                    result, err = yield self._release_reservation(key)
+                elif operation == 'RELEASE-ALL':
+                    err = yield self._release_all_reservations()
+                elif operation == 'WATCH':
+                    for name, val in kw.items():
+                        if name == 'callback':
+                            callback = val
+                            break
+                    result = self.client.watch([KeySet(key, prefix=True)], callback)
+                self._clear_backoff()
+                break
+            except ConnectionRefusedError as ex:
+                log.error('comms-exception', ex=ex)
+                yield self._backoff('etcd-not-up')
+            except Exception as ex:
+                log.error('etcd-exception', ex=ex)
+                err = ex
+
+            if timeout > 0 and self.retry_time > timeout:
+                err = 'operation-timed-out'
+            if err is not None:
+                self._clear_backoff()
+                break
+
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _get(self, key):
+        kvp = None
+        resp = yield self.client.get(key)
+        if resp.kvs is not None and len(resp.kvs) == 1:
+            kv = resp.kvs[0]
+            kvp = KVPair(kv.key, kv.value, kv.mod_revision)
+        returnValue(kvp)
+
+    @inlineCallbacks
+    def _list(self, key):
+        err = None
+        list = []
+        resp = yield self.client.get(KeySet(key, prefix=True))
+        if resp.kvs is not None and len(resp.kvs) > 0:
+            for kv in resp.kvs:
+                list.append(KVPair(kv.key, kv.value, kv.mod_revision))
+        returnValue((list, err))
+
+    @inlineCallbacks
+    def _reserve(self, key, value, **kw):
+        for name, val in kw.items():
+            if name == 'ttl':
+                ttl = val
+                break
+        reserved = False
+        err = 'reservation-failed'
+        owner = None
+
+        # Create a lease
+        lease = yield self.client.lease(ttl)
+
+        # Create a transaction
+        txn = Transaction(
+            compare=[ CompVersion(key, '==', 0) ],
+            success=[ OpSet(key, bytes(value), lease=lease) ],
+            failure=[ OpGet(key) ]
+        )
+        newly_acquired = False
+        try:
+            result = yield self.client.submit(txn)
+        except Failed as failed:
+            log.debug('key-already-present', key=key)
+            if len(failed.responses) > 0:
+                response = failed.responses[0]
+                if response.kvs is not None and len(response.kvs) > 0:
+                    kv = response.kvs[0]
+                    log.debug('key-already-present', value=kv.value)
+                    if kv.value == value:
+                        reserved = True
+                        log.debug('key-already-reserved', key = kv.key, value=kv.value)
+        else:
+            newly_acquired = True
+            log.debug('key-was-absent', key=key, result=result)
+
+        # Check if reservation succeeded
+        resp = yield self.client.get(key)
+        if resp.kvs is not None and len(resp.kvs) == 1:
+            owner = resp.kvs[0].value
+            if owner == value:
+                if newly_acquired:
+                    log.debug('key-reserved', key=key, value=value, ttl=ttl,
+                             lease_id=lease.lease_id)
+                    reserved = True
+                    # Add key to reservation list
+                    self.key_reservations[key] = lease
+                else:
+                    log.debug("reservation-still-held")
+            else:
+                log.debug('reservation-held-by-another', value=owner)
+
+        if reserved:
+            err = None
+        returnValue((owner, err))
+
+    @inlineCallbacks
+    def _renew_reservation(self, key):
+        result = None
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            lease = self.key_reservations[key]
+            # A successfully refreshed lease returns an object of type Header
+            result = yield lease.refresh()
+        if result is None:
+            err = 'lease-refresh-failed'
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_reservation(self, key):
+        err = None
+        if key not in self.key_reservations:
+            err = 'key-not-reserved'
+        else:
+            lease = self.key_reservations[key]
+            time_left = yield lease.remaining()
+            # A successfully revoked lease returns an object of type Header
+            log.debug('release-reservation', key=key, lease_id=lease.lease_id,
+                      time_left_in_secs=time_left)
+            result = yield lease.revoke()
+            if result is None:
+                err = 'lease-revoke-failed'
+            self.key_reservations.pop(key)
+        returnValue((result, err))
+
+    @inlineCallbacks
+    def _release_all_reservations(self):
+        err = None
+        keys_to_delete = []
+        for key in self.key_reservations:
+            lease = self.key_reservations[key]
+            time_left = yield lease.remaining()
+            # A successfully revoked lease returns an object of type Header
+            log.debug('release-reservation', key=key, lease_id=lease.lease_id,
+                      time_left_in_secs=time_left)
+            result = yield lease.revoke()
+            if result is None:
+                err = 'lease-revoke-failed'
+                log.debug('lease-revoke', result=result)
+            keys_to_delete.append(key)
+        for key in keys_to_delete:
+            self.key_reservations.pop(key)
+        returnValue(err)
diff --git a/python/adapters/common/kvstore/kv_client.py b/python/adapters/common/kvstore/kv_client.py
new file mode 100644
index 0000000..f6486f3
--- /dev/null
+++ b/python/adapters/common/kvstore/kv_client.py
@@ -0,0 +1,206 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from python.common.utils.asleep import asleep
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+
+log = get_logger()
+
+class KVPair():
+    def __init__(self, key, value, index):
+        self.key = key
+        self.value = value
+        self.index = index
+
+class Event():
+    PUT = 0
+    DELETE = 1
+    CONNECTION_DOWN = 2
+
+    def __init__(self, event_type, key, value):
+        self.event_type = event_type
+        self.key = key
+        self.value = value
+
+RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+DEFAULT_TIMEOUT = 0.0
+for i in range(len(RETRY_BACKOFF)):
+    DEFAULT_TIMEOUT += RETRY_BACKOFF[i]
+
+class KVClient():
+
+    def __init__(self, kv_host, kv_port):
+        self.host = kv_host
+        self.port = kv_port
+        self.key_reservations = {}
+        self.key_watches = {}
+        self.retries = 0
+        self.retry_time = 0
+
+    @inlineCallbacks
+    def get(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method returns the value of the given key in KV store.
+
+        :param key: The key whose value is requested
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: (KVPair, error) where KVPair is None if an error occurred
+        '''
+        result = yield self._op_with_retry('GET', key, None, timeout)
+        returnValue(result)
+
+    @inlineCallbacks
+    def list(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The list method returns an array of key-value pairs all of which
+        share the same key prefix.
+
+        :param key: The key prefix
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: ([]KVPair, error) where []KVPair is a list of KVPair objects
+        '''
+        result = yield self._op_with_retry('LIST', key, None, timeout)
+        returnValue(result)
+
+    @inlineCallbacks
+    def put(self, key, value, timeout=DEFAULT_TIMEOUT):
+        '''
+        The put method writes a value to the given key in KV store.
+        Do NOT modify a reserved key in an etcd store; doing so seems
+        to nullify the TTL of the key. In other words, the key lasts
+        forever.
+
+        :param key: The key to be written to
+        :param value: The value of the key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful write
+        '''
+        _, err = yield self._op_with_retry('PUT', key, value, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def delete(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The delete method removes a key from the KV store.
+
+        :param key: The key to be deleted
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful deletion
+        '''
+        _, err = yield self._op_with_retry('DELETE', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def reserve(self, key, value, ttl, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method acts essentially like a semaphore. The underlying mechanism
+        differs depending on the KV store: etcd uses a test-and-set transaction;
+        consul uses an acquire lock. If using etcd, do NOT write to the key
+        subsequent to the initial reservation; the TTL functionality may become
+        impaired (i.e. the reservation never expires).
+
+        :param key: The key under reservation
+        :param value: The reservation owner
+        :param ttl: The time-to-live (TTL) for the reservation. The key is unreserved
+        by the KV store when the TTL expires.
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: (key_value, error) If the key is acquired, then the value returned will
+        be the value passed in.  If the key is already acquired, then the value assigned
+        to that key will be returned.
+        '''
+        result = yield self._op_with_retry('RESERVE', key, value, timeout, ttl=ttl)
+        returnValue(result)
+
+    @inlineCallbacks
+    def renew_reservation(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method renews the reservation for a given key. A reservation expires
+        after the TTL (Time To Live) period specified when reserving the key.
+
+        :param key: The reserved key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful renewal
+        '''
+        result, err = yield self._op_with_retry('RENEW', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def release_reservation(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        The release_reservation method cancels the reservation for a given key.
+
+        :param key: The reserved key
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful cancellation
+        '''
+        result, err = yield self._op_with_retry('RELEASE', key, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def release_all_reservations(self, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method cancels all key reservations made previously
+        using the reserve API.
+
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: error, which is set to None for a successful cancellation
+        '''
+        result, err = yield self._op_with_retry('RELEASE-ALL', None, None, timeout)
+        returnValue(err)
+
+    @inlineCallbacks
+    def watch(self, key, key_change_callback, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method provides a watch capability for the given key. If the value of the key
+        changes or the key is deleted, then an event indicating the change is passed to
+        the given callback function.
+
+        :param key: The key to be watched
+        :param key_change_callback: The function invoked whenever the key changes
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: There is no return; key change events are passed to the callback function
+        '''
+        raise NotImplementedError('Method not implemented')
+
+    @inlineCallbacks
+    def close_watch(self, key, timeout=DEFAULT_TIMEOUT):
+        '''
+        This method closes the watch on the given key. Once the watch is closed, key
+        change events are no longer passed to the key change callback function.
+
+        :param key: The key under watch
+        :param timeout: The length of time in seconds the method will wait for a response
+        :return: There is no return
+        '''
+        raise NotImplementedError('Method not implemented')
+
+    @inlineCallbacks
+    def _op_with_retry(self, operation, key, value, timeout, *args, **kw):
+        raise NotImplementedError('Method not implemented')
+
+    def _backoff(self, msg):
+        wait_time = RETRY_BACKOFF[min(self.retries, len(RETRY_BACKOFF) - 1)]
+        self.retry_time += wait_time
+        self.retries += 1
+        log.error(msg, next_retry_in_secs=wait_time,
+                  total_delay_in_secs = self.retry_time,
+                  retries=self.retries)
+        return asleep(wait_time)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.debug('reset-backoff', after_retries=self.retries)
+            self.retries = 0
+            self.retry_time = 0
\ No newline at end of file
diff --git a/python/adapters/common/kvstore/kvstore.py b/python/adapters/common/kvstore/kvstore.py
new file mode 100644
index 0000000..ed7f246
--- /dev/null
+++ b/python/adapters/common/kvstore/kvstore.py
@@ -0,0 +1,31 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from consul_client import ConsulClient
+from etcd_client import EtcdClient
+
+def create_kv_client(kv_store, host, port):
+    '''
+    Factory for creating a client interface to a KV store
+
+    :param kv_store: Specify either 'etcd' or 'consul'
+    :param host: Name or IP address of host serving the KV store
+    :param port: Port number (integer) of the KV service
+    :return: Reference to newly created client interface
+    '''
+    if kv_store == 'etcd':
+        return EtcdClient(host, port)
+    elif kv_store == 'consul':
+        return ConsulClient(host, port)
+    return None
diff --git a/python/adapters/common/manhole.py b/python/adapters/common/manhole.py
deleted file mode 100644
index c00c900..0000000
--- a/python/adapters/common/manhole.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import rlcompleter
-from pprint import pprint
-
-import structlog
-from twisted.conch import manhole_ssh
-from twisted.conch.manhole import ColoredManhole
-from twisted.conch.ssh import keys
-from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
-from twisted.cred.portal import Portal
-from twisted.internet import reactor
-
-log = structlog.get_logger()
-
-
-MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
-MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
-
-
-def get_rsa_keys():
-    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
-                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
-        # generate a RSA keypair
-        log.info('generate-rsa-keypair')
-        from Crypto.PublicKey import RSA
-        rsa_key = RSA.generate(1024)
-        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
-        private_key_str = rsa_key.exportKey()
-
-        # save keys for next time
-        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
-        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
-        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
-                  private=MANHOLE_SERVER_RSA_PRIVATE)
-    else:
-        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
-        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
-    return public_key_str, private_key_str
-
-
-class ManholeWithCompleter(ColoredManhole):
-
-    def __init__(self, namespace):
-        namespace['manhole'] = self
-        super(ManholeWithCompleter, self).__init__(namespace)
-        self.last_tab = None
-        self.completer = rlcompleter.Completer(self.namespace)
-
-    def handle_TAB(self):
-        if self.last_tab != self.lineBuffer:
-            self.last_tab = self.lineBuffer
-            return
-
-        buffer = ''.join(self.lineBuffer)
-        completions = []
-        maxlen = 3
-        for c in xrange(1000):
-            candidate = self.completer.complete(buffer, c)
-            if not candidate:
-                break
-
-            if len(candidate) > maxlen:
-                maxlen = len(candidate)
-
-            completions.append(candidate)
-
-        if len(completions) == 1:
-            rest = completions[0][len(buffer):]
-            self.terminal.write(rest)
-            self.lineBufferIndex += len(rest)
-            self.lineBuffer.extend(rest)
-
-        elif len(completions):
-            maxlen += 3
-            numcols = self.width / maxlen
-            self.terminal.nextLine()
-            for idx, candidate in enumerate(completions):
-                self.terminal.write('%%-%ss' % maxlen % candidate)
-                if not ((idx + 1) % numcols):
-                    self.terminal.nextLine()
-            self.terminal.nextLine()
-            self.drawInputLine()
-
-
-class Manhole(object):
-
-    def __init__(self, port, pws, **kw):
-        kw.update(globals())
-        kw['pp'] = pprint
-
-        realm = manhole_ssh.TerminalRealm()
-        manhole = ManholeWithCompleter(kw)
-
-        def windowChanged(_, win_size):
-            manhole.terminalSize(*reversed(win_size[:2]))
-
-        realm.sessionFactory.windowChanged = windowChanged
-        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
-        portal = Portal(realm)
-        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
-        factory = manhole_ssh.ConchFactory(portal)
-        public_key_str, private_key_str = get_rsa_keys()
-        factory.publicKeys = {
-            'ssh-rsa': keys.Key.fromString(public_key_str)
-        }
-        factory.privateKeys = {
-            'ssh-rsa': keys.Key.fromString(private_key_str)
-        }
-        reactor.listenTCP(port, factory, interface='localhost')
-
-
-if __name__ == '__main__':
-    Manhole(12222, dict(admin='admin'))
-    reactor.run()
diff --git a/python/adapters/common/openflow/utils.py b/python/adapters/common/openflow/utils.py
deleted file mode 100644
index 730c714..0000000
--- a/python/adapters/common/openflow/utils.py
+++ /dev/null
@@ -1,558 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import structlog
-
-from adapters.protos import openflow_13_pb2 as ofp
-from hashlib import md5
-
-log = structlog.get_logger()
-
-# aliases
-ofb_field = ofp.ofp_oxm_ofb_field
-action = ofp.ofp_action
-
-# OFPAT_* shortcuts
-OUTPUT = ofp.OFPAT_OUTPUT
-COPY_TTL_OUT = ofp.OFPAT_COPY_TTL_OUT
-COPY_TTL_IN = ofp.OFPAT_COPY_TTL_IN
-SET_MPLS_TTL = ofp.OFPAT_SET_MPLS_TTL
-DEC_MPLS_TTL = ofp.OFPAT_DEC_MPLS_TTL
-PUSH_VLAN = ofp.OFPAT_PUSH_VLAN
-POP_VLAN = ofp.OFPAT_POP_VLAN
-PUSH_MPLS = ofp.OFPAT_PUSH_MPLS
-POP_MPLS = ofp.OFPAT_POP_MPLS
-SET_QUEUE = ofp.OFPAT_SET_QUEUE
-GROUP = ofp.OFPAT_GROUP
-SET_NW_TTL = ofp.OFPAT_SET_NW_TTL
-NW_TTL = ofp.OFPAT_DEC_NW_TTL
-SET_FIELD = ofp.OFPAT_SET_FIELD
-PUSH_PBB = ofp.OFPAT_PUSH_PBB
-POP_PBB = ofp.OFPAT_POP_PBB
-EXPERIMENTER = ofp.OFPAT_EXPERIMENTER
-
-# OFPXMT_OFB_* shortcuts (incomplete)
-IN_PORT = ofp.OFPXMT_OFB_IN_PORT
-IN_PHY_PORT = ofp.OFPXMT_OFB_IN_PHY_PORT
-METADATA = ofp.OFPXMT_OFB_METADATA
-ETH_DST = ofp.OFPXMT_OFB_ETH_DST
-ETH_SRC = ofp.OFPXMT_OFB_ETH_SRC
-ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
-VLAN_VID = ofp.OFPXMT_OFB_VLAN_VID
-VLAN_PCP = ofp.OFPXMT_OFB_VLAN_PCP
-IP_DSCP = ofp.OFPXMT_OFB_IP_DSCP
-IP_ECN = ofp.OFPXMT_OFB_IP_ECN
-IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
-IPV4_SRC = ofp.OFPXMT_OFB_IPV4_SRC
-IPV4_DST = ofp.OFPXMT_OFB_IPV4_DST
-TCP_SRC = ofp.OFPXMT_OFB_TCP_SRC
-TCP_DST = ofp.OFPXMT_OFB_TCP_DST
-UDP_SRC = ofp.OFPXMT_OFB_UDP_SRC
-UDP_DST = ofp.OFPXMT_OFB_UDP_DST
-SCTP_SRC = ofp.OFPXMT_OFB_SCTP_SRC
-SCTP_DST = ofp.OFPXMT_OFB_SCTP_DST
-ICMPV4_TYPE = ofp.OFPXMT_OFB_ICMPV4_TYPE
-ICMPV4_CODE = ofp.OFPXMT_OFB_ICMPV4_CODE
-ARP_OP = ofp.OFPXMT_OFB_ARP_OP
-ARP_SPA = ofp.OFPXMT_OFB_ARP_SPA
-ARP_TPA = ofp.OFPXMT_OFB_ARP_TPA
-ARP_SHA = ofp.OFPXMT_OFB_ARP_SHA
-ARP_THA = ofp.OFPXMT_OFB_ARP_THA
-IPV6_SRC = ofp.OFPXMT_OFB_IPV6_SRC
-IPV6_DST = ofp.OFPXMT_OFB_IPV6_DST
-IPV6_FLABEL = ofp.OFPXMT_OFB_IPV6_FLABEL
-ICMPV6_TYPE = ofp.OFPXMT_OFB_ICMPV6_TYPE
-ICMPV6_CODE = ofp.OFPXMT_OFB_ICMPV6_CODE
-IPV6_ND_TARGET = ofp.OFPXMT_OFB_IPV6_ND_TARGET
-OFB_IPV6_ND_SLL = ofp.OFPXMT_OFB_IPV6_ND_SLL
-IPV6_ND_TLL = ofp.OFPXMT_OFB_IPV6_ND_TLL
-MPLS_LABEL = ofp.OFPXMT_OFB_MPLS_LABEL
-MPLS_TC = ofp.OFPXMT_OFB_MPLS_TC
-MPLS_BOS = ofp.OFPXMT_OFB_MPLS_BOS
-PBB_ISID = ofp.OFPXMT_OFB_PBB_ISID
-TUNNEL_ID = ofp.OFPXMT_OFB_TUNNEL_ID
-IPV6_EXTHDR = ofp.OFPXMT_OFB_IPV6_EXTHDR
-
-
-# ofp_action_* shortcuts
-
-def output(port, max_len=ofp.OFPCML_MAX):
-    return action(
-        type=OUTPUT,
-        output=ofp.ofp_action_output(port=port, max_len=max_len)
-    )
-
-
-def mpls_ttl(ttl):
-    return action(
-        type=SET_MPLS_TTL,
-        mpls_ttl=ofp.ofp_action_mpls_ttl(mpls_ttl=ttl)
-    )
-
-
-def push_vlan(eth_type):
-    return action(
-        type=PUSH_VLAN,
-        push=ofp.ofp_action_push(ethertype=eth_type)
-    )
-
-
-def pop_vlan():
-    return action(
-        type=POP_VLAN
-    )
-
-
-def pop_mpls(eth_type):
-    return action(
-        type=POP_MPLS,
-        pop_mpls=ofp.ofp_action_pop_mpls(ethertype=eth_type)
-    )
-
-
-def group(group_id):
-    return action(
-        type=GROUP,
-        group=ofp.ofp_action_group(group_id=group_id)
-    )
-
-
-def nw_ttl(nw_ttl):
-    return action(
-        type=NW_TTL,
-        nw_ttl=ofp.ofp_action_nw_ttl(nw_ttl=nw_ttl)
-    )
-
-
-def set_field(field):
-    return action(
-        type=SET_FIELD,
-        set_field=ofp.ofp_action_set_field(
-            field=ofp.ofp_oxm_field(
-                oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
-                ofb_field=field))
-    )
-
-
-def experimenter(experimenter, data):
-    return action(
-        type=EXPERIMENTER,
-        experimenter=ofp.ofp_action_experimenter(
-            experimenter=experimenter, data=data)
-    )
-
-
-# ofb_field generators (incomplete set)
-
-def in_port(_in_port):
-    return ofb_field(type=IN_PORT, port=_in_port)
-
-
-def in_phy_port(_in_phy_port):
-    return ofb_field(type=IN_PHY_PORT, port=_in_phy_port)
-
-
-def metadata(_table_metadata):
-    return ofb_field(type=METADATA, table_metadata=_table_metadata)
-
-
-def eth_dst(_eth_dst):
-    return ofb_field(type=ETH_DST, table_metadata=_eth_dst)
-
-
-def eth_src(_eth_src):
-    return ofb_field(type=ETH_SRC, table_metadata=_eth_src)
-
-
-def eth_type(_eth_type):
-    return ofb_field(type=ETH_TYPE, eth_type=_eth_type)
-
-
-def vlan_vid(_vlan_vid):
-    return ofb_field(type=VLAN_VID, vlan_vid=_vlan_vid)
-
-
-def vlan_pcp(_vlan_pcp):
-    return ofb_field(type=VLAN_PCP, vlan_pcp=_vlan_pcp)
-
-
-def ip_dscp(_ip_dscp):
-    return ofb_field(type=IP_DSCP, ip_dscp=_ip_dscp)
-
-
-def ip_ecn(_ip_ecn):
-    return ofb_field(type=IP_ECN, ip_ecn=_ip_ecn)
-
-
-def ip_proto(_ip_proto):
-    return ofb_field(type=IP_PROTO, ip_proto=_ip_proto)
-
-
-def ipv4_src(_ipv4_src):
-    return ofb_field(type=IPV4_SRC, ipv4_src=_ipv4_src)
-
-
-def ipv4_dst(_ipv4_dst):
-    return ofb_field(type=IPV4_DST, ipv4_dst=_ipv4_dst)
-
-
-def tcp_src(_tcp_src):
-    return ofb_field(type=TCP_SRC, tcp_src=_tcp_src)
-
-
-def tcp_dst(_tcp_dst):
-    return ofb_field(type=TCP_DST, tcp_dst=_tcp_dst)
-
-
-def udp_src(_udp_src):
-    return ofb_field(type=UDP_SRC, udp_src=_udp_src)
-
-
-def udp_dst(_udp_dst):
-    return ofb_field(type=UDP_DST, udp_dst=_udp_dst)
-
-
-def sctp_src(_sctp_src):
-    return ofb_field(type=SCTP_SRC, sctp_src=_sctp_src)
-
-
-def sctp_dst(_sctp_dst):
-    return ofb_field(type=SCTP_DST, sctp_dst=_sctp_dst)
-
-
-def icmpv4_type(_icmpv4_type):
-    return ofb_field(type=ICMPV4_TYPE, icmpv4_type=_icmpv4_type)
-
-
-def icmpv4_code(_icmpv4_code):
-    return ofb_field(type=ICMPV4_CODE, icmpv4_code=_icmpv4_code)
-
-
-def arp_op(_arp_op):
-    return ofb_field(type=ARP_OP, arp_op=_arp_op)
-
-
-def arp_spa(_arp_spa):
-    return ofb_field(type=ARP_SPA, arp_spa=_arp_spa)
-
-
-def arp_tpa(_arp_tpa):
-    return ofb_field(type=ARP_TPA, arp_tpa=_arp_tpa)
-
-
-def arp_sha(_arp_sha):
-    return ofb_field(type=ARP_SHA, arp_sha=_arp_sha)
-
-
-def arp_tha(_arp_tha):
-    return ofb_field(type=ARP_THA, arp_tha=_arp_tha)
-
-
-def ipv6_src(_ipv6_src):
-    return ofb_field(type=IPV6_SRC, arp_tha=_ipv6_src)
-
-
-def ipv6_dst(_ipv6_dst):
-    return ofb_field(type=IPV6_DST, arp_tha=_ipv6_dst)
-
-
-def ipv6_flabel(_ipv6_flabel):
-    return ofb_field(type=IPV6_FLABEL, arp_tha=_ipv6_flabel)
-
-
-def ipmpv6_type(_icmpv6_type):
-    return ofb_field(type=ICMPV6_TYPE, arp_tha=_icmpv6_type)
-
-
-def icmpv6_code(_icmpv6_code):
-    return ofb_field(type=ICMPV6_CODE, arp_tha=_icmpv6_code)
-
-
-def ipv6_nd_target(_ipv6_nd_target):
-    return ofb_field(type=IPV6_ND_TARGET, arp_tha=_ipv6_nd_target)
-
-
-def ofb_ipv6_nd_sll(_ofb_ipv6_nd_sll):
-    return ofb_field(type=OFB_IPV6_ND_SLL, arp_tha=_ofb_ipv6_nd_sll)
-
-
-def ipv6_nd_tll(_ipv6_nd_tll):
-    return ofb_field(type=IPV6_ND_TLL, arp_tha=_ipv6_nd_tll)
-
-
-def mpls_label(_mpls_label):
-    return ofb_field(type=MPLS_LABEL, arp_tha=_mpls_label)
-
-
-def mpls_tc(_mpls_tc):
-    return ofb_field(type=MPLS_TC, arp_tha=_mpls_tc)
-
-
-def mpls_bos(_mpls_bos):
-    return ofb_field(type=MPLS_BOS, arp_tha=_mpls_bos)
-
-
-def pbb_isid(_pbb_isid):
-    return ofb_field(type=PBB_ISID, arp_tha=_pbb_isid)
-
-
-def tunnel_id(_tunnel_id):
-    return ofb_field(type=TUNNEL_ID, arp_tha=_tunnel_id)
-
-
-def ipv6_exthdr(_ipv6_exthdr):
-    return ofb_field(type=IPV6_EXTHDR, arp_tha=_ipv6_exthdr)
-
-
-# frequently used extractors:
-
-def get_actions(flow):
-    """Extract list of ofp_action objects from flow spec object"""
-    assert isinstance(flow, ofp.ofp_flow_stats)
-    # we have the following hard assumptions for now
-    for instruction in flow.instructions:
-        if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
-            return instruction.actions.actions
-
-
-def get_ofb_fields(flow):
-    assert isinstance(flow, ofp.ofp_flow_stats)
-    assert flow.match.type == ofp.OFPMT_OXM
-    ofb_fields = []
-    for field in flow.match.oxm_fields:
-        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
-        ofb_fields.append(field.ofb_field)
-    return ofb_fields
-
-
-def get_out_port(flow):
-    for action in get_actions(flow):
-        if action.type == OUTPUT:
-            return action.output.port
-    return None
-
-
-def get_in_port(flow):
-    for field in get_ofb_fields(flow):
-        if field.type == IN_PORT:
-            return field.port
-    return None
-
-
-def get_goto_table_id(flow):
-    for instruction in flow.instructions:
-        if instruction.type == ofp.OFPIT_GOTO_TABLE:
-            return instruction.goto_table.table_id
-    return None
-
-
-def get_metadata(flow):
-    ''' legacy get method (only want lower 32 bits '''
-    for field in get_ofb_fields(flow):
-        if field.type == METADATA:
-            return field.table_metadata & 0xffffffff
-    return None
-
-
-def get_metadata_64_bit(flow):
-    for field in get_ofb_fields(flow):
-        if field.type == METADATA:
-            return field.table_metadata
-    return None
-
-
-def get_port_number_from_metadata(flow):
-    """
-    The port number (UNI on ONU) is in the lower 32-bits of metadata and
-    the inner_tag is in the upper 32-bits
-
-    This is set in the ONOS OltPipeline as a metadata field
-    """
-    md = get_metadata_64_bit(flow)
-
-    if md is None:
-        return None
-
-    if md <= 0xffffffff:
-        log.warn('onos-upgrade-suggested',
-                 netadata=md,
-                 message='Legacy MetaData detected form OltPipeline')
-        return md
-
-    return md & 0xffffffff
-
-
-def get_inner_tag_from_metadata(flow):
-    """
-    The port number (UNI on ONU) is in the lower 32-bits of metadata and
-    the inner_tag is in the upper 32-bits
-
-    This is set in the ONOS OltPipeline as a metadata field
-    """
-    md = get_metadata_64_bit(flow)
-
-    if md is None:
-        return None
-
-    if md <= 0xffffffff:
-        log.warn('onos-upgrade-suggested',
-                 netadata=md,
-                 message='Legacy MetaData detected form OltPipeline')
-        return md
-
-    return (md >> 32) & 0xffffffff
-
-
-# test and extract next table and group information
-def has_next_table(flow):
-    return get_goto_table_id(flow) is not None
-
-
-def get_group(flow):
-    for action in get_actions(flow):
-        if action.type == GROUP:
-            return action.group.group_id
-    return None
-
-
-def has_group(flow):
-    return get_group(flow) is not None
-
-
-def mk_oxm_fields(match_fields):
-    oxm_fields = [
-        ofp.ofp_oxm_field(
-            oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
-            ofb_field=field
-        ) for field in match_fields
-    ]
-
-    return oxm_fields
-
-
-def mk_instructions_from_actions(actions):
-    instructions_action = ofp.ofp_instruction_actions()
-    instructions_action.actions.extend(actions)
-    instruction = ofp.ofp_instruction(type=ofp.OFPIT_APPLY_ACTIONS,
-                                      actions=instructions_action)
-    return [instruction]
-
-
-def mk_simple_flow_mod(match_fields, actions, command=ofp.OFPFC_ADD,
-                       next_table_id=None, **kw):
-    """
-    Convenience function to generare ofp_flow_mod message with OXM BASIC match
-    composed from the match_fields, and single APPLY_ACTIONS instruction with
-    a list if ofp_action objects.
-    :param match_fields: list(ofp_oxm_ofb_field)
-    :param actions: list(ofp_action)
-    :param command: one of OFPFC_*
-    :param kw: additional keyword-based params to ofp_flow_mod
-    :return: initialized ofp_flow_mod object
-    """
-    instructions = [
-        ofp.ofp_instruction(
-            type=ofp.OFPIT_APPLY_ACTIONS,
-            actions=ofp.ofp_instruction_actions(actions=actions)
-        )
-    ]
-    if next_table_id is not None:
-        instructions.append(ofp.ofp_instruction(
-            type=ofp.OFPIT_GOTO_TABLE,
-            goto_table=ofp.ofp_instruction_goto_table(table_id=next_table_id)
-        ))
-
-    return ofp.ofp_flow_mod(
-        command=command,
-        match=ofp.ofp_match(
-            type=ofp.OFPMT_OXM,
-            oxm_fields=[
-                ofp.ofp_oxm_field(
-                    oxm_class=ofp.OFPXMC_OPENFLOW_BASIC,
-                    ofb_field=field
-                ) for field in match_fields
-            ]
-        ),
-        instructions=instructions,
-        **kw
-    )
-
-
-def mk_multicast_group_mod(group_id, buckets, command=ofp.OFPGC_ADD):
-    group = ofp.ofp_group_mod(
-        command=command,
-        type=ofp.OFPGT_ALL,
-        group_id=group_id,
-        buckets=buckets
-    )
-    return group
-
-
-def hash_flow_stats(flow):
-    """
-    Return unique 64-bit integer hash for flow covering the following
-    attributes: 'table_id', 'priority', 'flags', 'cookie', 'match', '_instruction_string'
-    """
-    _instruction_string = ""
-    for _instruction in flow.instructions:
-        _instruction_string += _instruction.SerializeToString()
-
-    hex = md5('{},{},{},{},{},{}'.format(
-        flow.table_id,
-        flow.priority,
-        flow.flags,
-        flow.cookie,
-        flow.match.SerializeToString(),
-        _instruction_string
-    )).hexdigest()
-    return int(hex[:16], 16)
-
-
-def flow_stats_entry_from_flow_mod_message(mod):
-    flow = ofp.ofp_flow_stats(
-        table_id=mod.table_id,
-        priority=mod.priority,
-        idle_timeout=mod.idle_timeout,
-        hard_timeout=mod.hard_timeout,
-        flags=mod.flags,
-        cookie=mod.cookie,
-        match=mod.match,
-        instructions=mod.instructions
-    )
-    flow.id = hash_flow_stats(flow)
-    return flow
-
-
-def group_entry_from_group_mod(mod):
-    group = ofp.ofp_group_entry(
-        desc=ofp.ofp_group_desc(
-            type=mod.type,
-            group_id=mod.group_id,
-            buckets=mod.buckets
-        ),
-        stats=ofp.ofp_group_stats(
-            group_id=mod.group_id
-            # TODO do we need to instantiate bucket bins?
-        )
-    )
-    return group
-
-
-def mk_flow_stat(**kw):
-    return flow_stats_entry_from_flow_mod_message(mk_simple_flow_mod(**kw))
-
-
-def mk_group_stat(**kw):
-    return group_entry_from_group_mod(mk_multicast_group_mod(**kw))
diff --git a/python/adapters/common/openflow/__init__.py b/python/adapters/common/pon_resource_manager/__init__.py
similarity index 99%
rename from python/adapters/common/openflow/__init__.py
rename to python/adapters/common/pon_resource_manager/__init__.py
index b0fb0b2..2d104e0 100644
--- a/python/adapters/common/openflow/__init__.py
+++ b/python/adapters/common/pon_resource_manager/__init__.py
@@ -1,13 +1,13 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# Copyright 2017-present Open Networking Foundation

+#

+# Licensed under the Apache License, Version 2.0 (the "License");

+# you may not use this file except in compliance with the License.

+# You may obtain a copy of the License at

+#

+# http://www.apache.org/licenses/LICENSE-2.0

+#

+# Unless required by applicable law or agreed to in writing, software

+# distributed under the License is distributed on an "AS IS" BASIS,

+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# See the License for the specific language governing permissions and

+# limitations under the License.

diff --git a/python/adapters/common/pon_resource_manager/resource_kv_store.py b/python/adapters/common/pon_resource_manager/resource_kv_store.py
new file mode 100644
index 0000000..a1a5c14
--- /dev/null
+++ b/python/adapters/common/pon_resource_manager/resource_kv_store.py
@@ -0,0 +1,107 @@
+#

+# Copyright 2018 the original author or authors.

+#

+# Licensed under the Apache License, Version 2.0 (the "License");

+# you may not use this file except in compliance with the License.

+# You may obtain a copy of the License at

+#

+# http://www.apache.org/licenses/LICENSE-2.0

+#

+# Unless required by applicable law or agreed to in writing, software

+# distributed under the License is distributed on an "AS IS" BASIS,

+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# See the License for the specific language governing permissions and

+# limitations under the License.

+#

+

+"""Resource KV store - interface between Resource Manager and backend store."""

+import structlog

+

+from voltha.core.config.config_backend import ConsulStore

+from voltha.core.config.config_backend import EtcdStore

+

+# KV store uses this prefix to store resource info

+PATH_PREFIX = 'resource_manager/{}'

+

+

+class ResourceKvStore(object):

+    """Implements apis to store/get/remove resource in backend store."""

+

+    def __init__(self, technology, device_id, backend, host, port):

+        """

+        Create ResourceKvStore object.

+

+        Based on backend ('consul' and 'etcd' use the host and port

+        to create the respective object.

+

+        :param technology: PON technology

+        :param device_id: OLT device id

+        :param backend: Type of backend storage (etcd or consul)

+        :param host: host ip info for backend storage

+        :param port: port for the backend storage

+        :raises exception when invalid backend store passed as an argument

+        """

+        # logger

+        self._log = structlog.get_logger()

+

+        path = PATH_PREFIX.format(technology)

+        try:

+            if backend == 'consul':

+                self._kv_store = ConsulStore(host, port, path)

+            elif backend == 'etcd':

+                self._kv_store = EtcdStore(host, port, path)

+            else:

+                self._log.error('Invalid-backend')

+                raise Exception("Invalid-backend-for-kv-store")

+        except Exception as e:

+            self._log.exception("exception-in-init")

+            raise Exception(e)

+

+    def update_to_kv_store(self, path, resource):

+        """

+        Update resource.

+

+        :param path: path to update the resource

+        :param resource: updated resource

+        """

+        try:

+            self._kv_store[path] = str(resource)

+            self._log.debug("Resource-updated-in-kv-store", path=path)

+            return True

+        except BaseException:

+            self._log.exception("Resource-update-in-kv-store-failed",

+                                path=path, resource=resource)

+        return False

+

+    def get_from_kv_store(self, path):

+        """

+        Get resource.

+

+        :param path: path to get the resource

+        """

+        resource = None

+        try:

+            resource = self._kv_store[path]

+            self._log.debug("Got-resource-from-kv-store", path=path)

+        except KeyError:

+            self._log.info("Resource-not-found-updating-resource",

+                           path=path)

+        except BaseException:

+            self._log.exception("Getting-resource-from-kv-store-failed",

+                                path=path)

+        return resource

+

+    def remove_from_kv_store(self, path):

+        """

+        Remove resource.

+

+        :param path: path to remove the resource

+        """

+        try:

+            del self._kv_store[path]

+            self._log.debug("Resource-deleted-in-kv-store", path=path)

+            return True

+        except BaseException:

+            self._log.exception("Resource-delete-in-kv-store-failed",

+                                path=path)

+        return False

diff --git a/python/adapters/common/pon_resource_manager/resource_manager.py b/python/adapters/common/pon_resource_manager/resource_manager.py
new file mode 100644
index 0000000..17b2871
--- /dev/null
+++ b/python/adapters/common/pon_resource_manager/resource_manager.py
@@ -0,0 +1,677 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Resource Manager will be unique for each OLT device.
+
+It exposes APIs to create/free alloc_ids/onu_ids/gemport_ids. Resource Manager
+uses a KV store in backend to ensure resiliency of the data.
+"""
+import json
+import structlog
+from bitstring import BitArray
+from ast import literal_eval
+import shlex
+from argparse import ArgumentParser, ArgumentError
+
+from common.pon_resource_manager.resource_kv_store import ResourceKvStore
+
+
+# Used to parse extra arguments to OpenOlt adapter from the NBI
+class OltVendorArgumentParser(ArgumentParser):
+    # Must override the exit command to prevent it from
+    # calling sys.exit().  Return exception instead.
+    def exit(self, status=0, message=None):
+        raise Exception(message)
+
+
+class PONResourceManager(object):
+    """Implements APIs to initialize/allocate/release alloc/gemport/onu IDs."""
+
+    # Constants to identify resource pool
+    ONU_ID = 'ONU_ID'
+    ALLOC_ID = 'ALLOC_ID'
+    GEMPORT_ID = 'GEMPORT_ID'
+
+    # The resource ranges for a given device vendor_type should be placed
+    # at 'resource_manager/<technology>/resource_ranges/<olt_vendor_type>'
+    # path on the KV store.
+    # If Resource Range parameters are to be read from the external KV store,
+    # they are expected to be stored in the following format.
+    # Note: All parameters are MANDATORY for now.
+    '''
+    {
+        "onu_id_start": 1,
+        "onu_id_end": 127,
+        "alloc_id_start": 1024,
+        "alloc_id_end": 2816,
+        "gemport_id_start": 1024,
+        "gemport_id_end": 8960,
+        "pon_ports": 16
+    }
+
+    '''
+    # constants used as keys to reference the resource range parameters from
+    # and external KV store.
+    ONU_START_IDX = "onu_id_start"
+    ONU_END_IDX = "onu_id_end"
+    ALLOC_ID_START_IDX = "alloc_id_start"
+    ALLOC_ID_END_IDX = "alloc_id_end"
+    GEM_PORT_ID_START_IDX = "gemport_id_start"
+    GEM_PORT_ID_END_IDX = "gemport_id_end"
+    NUM_OF_PON_PORT = "pon_ports"
+
+    # PON Resource range configuration on the KV store.
+    # Format: 'resource_manager/<technology>/resource_ranges/<olt_vendor_type>'
+    # The KV store backend is initialized with a path prefix and we need to
+    # provide only the suffix.
+    PON_RESOURCE_RANGE_CONFIG_PATH = 'resource_ranges/{}'
+
+    # resource path suffix
+    ALLOC_ID_POOL_PATH = '{}/alloc_id_pool/{}'
+    GEMPORT_ID_POOL_PATH = '{}/gemport_id_pool/{}'
+    ONU_ID_POOL_PATH = '{}/onu_id_pool/{}'
+
+    # Path on the KV store for storing list of alloc IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+    ALLOC_ID_RESOURCE_MAP_PATH = '{}/{}/alloc_ids'
+
+    # Path on the KV store for storing list of gemport IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+    GEMPORT_ID_RESOURCE_MAP_PATH = '{}/{}/gemport_ids'
+
+    # Constants for internal usage.
+    PON_INTF_ID = 'pon_intf_id'
+    START_IDX = 'start_idx'
+    END_IDX = 'end_idx'
+    POOL = 'pool'
+
+    def __init__(self, technology, extra_args, device_id,
+                 backend, host, port):
+        """
+        Create PONResourceManager object.
+
+        :param technology: PON technology
+        :param: extra_args: This string contains extra arguments passed during
+        pre-provisioning of OLT and specifies the OLT Vendor type
+        :param device_id: OLT device id
+        :param backend: backend store
+        :param host: ip of backend store
+        :param port: port on which backend store listens
+        :raises exception when invalid backend store passed as an argument
+        """
+        # logger
+        self._log = structlog.get_logger()
+
+        try:
+            self.technology = technology
+            self.extra_args = extra_args
+            self.device_id = device_id
+            self.backend = backend
+            self.host = host
+            self.port = port
+            self.olt_vendor = None
+            self._kv_store = ResourceKvStore(technology, device_id, backend,
+                                             host, port)
+            # Below attribute, pon_resource_ranges, should be initialized
+            # by reading from KV store.
+            self.pon_resource_ranges = dict()
+        except Exception as e:
+            self._log.exception("exception-in-init")
+            raise Exception(e)
+
+    def init_resource_ranges_from_kv_store(self):
+        """
+        Initialize PON resource ranges with config fetched from kv store.
+
+        :return boolean: True if PON resource ranges initialized else false
+        """
+        self.olt_vendor = self._get_olt_vendor()
+        # Try to initialize the PON Resource Ranges from KV store based on the
+        # OLT vendor key, if available
+        if self.olt_vendor is None:
+            self._log.info("olt-vendor-unavailable--not-reading-from-kv-store")
+            return False
+
+        path = self.PON_RESOURCE_RANGE_CONFIG_PATH.format(self.olt_vendor)
+        try:
+            # get resource from kv store
+            result = self._kv_store.get_from_kv_store(path)
+
+            if result is None:
+                self._log.debug("resource-range-config-unavailable-on-kvstore")
+                return False
+
+            resource_range_config = result
+
+            if resource_range_config is not None:
+                self.pon_resource_ranges = json.loads(resource_range_config)
+                self._log.debug("Init-resource-ranges-from-kvstore-success",
+                                pon_resource_ranges=self.pon_resource_ranges,
+                                path=path)
+                return True
+
+        except Exception as e:
+            self._log.exception("error-initializing-resource-range-from-kv-store",
+                                e=e)
+        return False
+
+    def init_default_pon_resource_ranges(self, onu_start_idx=1,
+                                         onu_end_idx=127,
+                                         alloc_id_start_idx=1024,
+                                         alloc_id_end_idx=2816,
+                                         gem_port_id_start_idx=1024,
+                                         gem_port_id_end_idx=8960,
+                                         num_of_pon_ports=16):
+        """
+        Initialize default PON resource ranges
+
+        :param onu_start_idx: onu id start index
+        :param onu_end_idx: onu id end index
+        :param alloc_id_start_idx: alloc id start index
+        :param alloc_id_end_idx: alloc id end index
+        :param gem_port_id_start_idx: gemport id start index
+        :param gem_port_id_end_idx: gemport id end index
+        :param num_of_pon_ports: number of PON ports
+        """
+        self._log.info("initialize-default-resource-range-values")
+        self.pon_resource_ranges[
+            PONResourceManager.ONU_START_IDX] = onu_start_idx
+        self.pon_resource_ranges[PONResourceManager.ONU_END_IDX] = onu_end_idx
+        self.pon_resource_ranges[
+            PONResourceManager.ALLOC_ID_START_IDX] = alloc_id_start_idx
+        self.pon_resource_ranges[
+            PONResourceManager.ALLOC_ID_END_IDX] = alloc_id_end_idx
+        self.pon_resource_ranges[
+            PONResourceManager.GEM_PORT_ID_START_IDX] = gem_port_id_start_idx
+        self.pon_resource_ranges[
+            PONResourceManager.GEM_PORT_ID_END_IDX] = gem_port_id_end_idx
+        self.pon_resource_ranges[
+            PONResourceManager.NUM_OF_PON_PORT] = num_of_pon_ports
+
+    def init_device_resource_pool(self):
+        """
+        Initialize resource pool for all PON ports.
+        """
+        i = 0
+        while i < self.pon_resource_ranges[PONResourceManager.NUM_OF_PON_PORT]:
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ONU_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.ONU_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.ONU_END_IDX])
+
+            i += 1
+
+        # TODO: ASFvOLT16 platform requires alloc and gemport ID to be unique
+        # across OLT. To keep it simple, a single pool (POOL 0) is maintained
+        # for both the resource types. This may need to change later.
+        self.init_resource_id_pool(
+            pon_intf_id=0,
+            resource_type=PONResourceManager.ALLOC_ID,
+            start_idx=self.pon_resource_ranges[
+                PONResourceManager.ALLOC_ID_START_IDX],
+            end_idx=self.pon_resource_ranges[
+                PONResourceManager.ALLOC_ID_END_IDX])
+
+        self.init_resource_id_pool(
+            pon_intf_id=0,
+            resource_type=PONResourceManager.GEMPORT_ID,
+            start_idx=self.pon_resource_ranges[
+                PONResourceManager.GEM_PORT_ID_START_IDX],
+            end_idx=self.pon_resource_ranges[
+                PONResourceManager.GEM_PORT_ID_END_IDX])
+
+    def clear_device_resource_pool(self):
+        """
+        Clear resource pool of all PON ports.
+        """
+        i = 0
+        while i < self.pon_resource_ranges[PONResourceManager.NUM_OF_PON_PORT]:
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.ONU_ID,
+            )
+            i += 1
+
+        self.clear_resource_id_pool(
+            pon_intf_id=0,
+            resource_type=PONResourceManager.ALLOC_ID,
+        )
+
+        self.clear_resource_id_pool(
+            pon_intf_id=0,
+            resource_type=PONResourceManager.GEMPORT_ID,
+        )
+
+    def init_resource_id_pool(self, pon_intf_id, resource_type, start_idx,
+                              end_idx):
+        """
+        Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param start_idx: start index for onu id pool
+        :param end_idx: end index for onu id pool
+        :return boolean: True if resource id pool initialized else false
+        """
+        status = False
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return status
+
+        try:
+            # In case of adapter reboot and reconciliation resource in kv store
+            # checked for its presence if not kv store update happens
+            resource = self._get_resource(path)
+
+            if resource is not None:
+                self._log.info("Resource-already-present-in-store", path=path)
+                status = True
+            else:
+                resource = self._format_resource(pon_intf_id, start_idx,
+                                                 end_idx)
+                self._log.info("Resource-initialized", path=path)
+
+                # Add resource as json in kv store.
+                result = self._kv_store.update_to_kv_store(path, resource)
+                if result is True:
+                    status = True
+
+        except Exception as e:
+            self._log.exception("error-initializing-resource-pool", e=e)
+
+        return status
+
+    def get_resource_id(self, pon_intf_id, resource_type, num_of_id=1):
+        """
+        Create alloc/gemport/onu id for given OLT PON interface.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param num_of_id: required number of ids
+        :return list/int/None: list, int or None if resource type is
+                               alloc_id/gemport_id, onu_id or invalid type
+                               respectively
+        """
+        result = None
+
+        # TODO: ASFvOLT16 platform requires alloc and gemport ID to be unique
+        # across OLT. To keep it simple, a single pool (POOL 0) is maintained
+        # for both the resource types. This may need to change later.
+        # Override the incoming pon_intf_id to PON0
+        if resource_type == PONResourceManager.GEMPORT_ID or \
+                resource_type == PONResourceManager.ALLOC_ID:
+            pon_intf_id = 0
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return result
+
+        try:
+            resource = self._get_resource(path)
+            if resource is not None and resource_type == \
+                    PONResourceManager.ONU_ID:
+                result = self._generate_next_id(resource)
+            elif resource is not None and (
+                    resource_type == PONResourceManager.GEMPORT_ID or
+                    resource_type == PONResourceManager.ALLOC_ID):
+                result = list()
+                while num_of_id > 0:
+                    result.append(self._generate_next_id(resource))
+                    num_of_id -= 1
+            else:
+                raise Exception("get-resource-failed")
+
+            self._log.debug("Get-" + resource_type + "-success", result=result,
+                            path=path)
+            # Update resource in kv store
+            self._update_resource(path, resource)
+
+        except Exception as e:
+            self._log.exception("Get-" + resource_type + "-id-failed",
+                                path=path, e=e)
+        return result
+
+    def free_resource_id(self, pon_intf_id, resource_type, release_content):
+        """
+        Release alloc/gemport/onu id for given OLT PON interface.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :param release_content: required number of ids
+        :return boolean: True if all IDs in given release_content released
+                         else False
+        """
+        status = False
+
+        # TODO: ASFvOLT16 platform requires alloc and gemport ID to be unique
+        # across OLT. To keep it simple, a single pool (POOL 0) is maintained
+        # for both the resource types. This may need to change later.
+        # Override the incoming pon_intf_id to PON0
+        if resource_type == PONResourceManager.GEMPORT_ID or \
+                resource_type == PONResourceManager.ALLOC_ID:
+            pon_intf_id = 0
+
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return status
+
+        try:
+            resource = self._get_resource(path)
+            if resource is not None and resource_type == \
+                    PONResourceManager.ONU_ID:
+                self._release_id(resource, release_content)
+            elif resource is not None and (
+                    resource_type == PONResourceManager.ALLOC_ID or
+                    resource_type == PONResourceManager.GEMPORT_ID):
+                for content in release_content:
+                    self._release_id(resource, content)
+            else:
+                raise Exception("get-resource-failed")
+
+            self._log.debug("Free-" + resource_type + "-success", path=path)
+
+            # Update resource in kv store
+            status = self._update_resource(path, resource)
+
+        except Exception as e:
+            self._log.exception("Free-" + resource_type + "-failed",
+                                path=path, e=e)
+        return status
+
+    def clear_resource_id_pool(self, pon_intf_id, resource_type):
+        """
+        Clear Resource Pool for a given Resource Type on a given PON Port.
+
+        :return boolean: True if removed else False
+        """
+        path = self._get_path(pon_intf_id, resource_type)
+        if path is None:
+            return False
+
+        try:
+            result = self._kv_store.remove_from_kv_store(path)
+            if result is True:
+                self._log.debug("Resource-pool-cleared",
+                                device_id=self.device_id,
+                                path=path)
+                return True
+        except Exception as e:
+            self._log.exception("error-clearing-resource-pool", e=e)
+
+        self._log.error("Clear-resource-pool-failed", device_id=self.device_id,
+                        path=path)
+        return False
+
+    def init_resource_map(self, pon_intf_onu_id):
+        """
+        Initialize resource map
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        # initialize pon_intf_onu_id tuple to alloc_ids map
+        alloc_id_path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        alloc_ids = list()
+        self._kv_store.update_to_kv_store(
+            alloc_id_path, json.dumps(alloc_ids)
+        )
+
+        # initialize pon_intf_onu_id tuple to gemport_ids map
+        gemport_id_path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        gemport_ids = list()
+        self._kv_store.update_to_kv_store(
+            gemport_id_path, json.dumps(gemport_ids)
+        )
+
+    def remove_resource_map(self, pon_intf_onu_id):
+        """
+        Remove resource map
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        # remove pon_intf_onu_id tuple to alloc_ids map
+        alloc_id_path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.remove_from_kv_store(alloc_id_path)
+
+        # remove pon_intf_onu_id tuple to gemport_ids map
+        gemport_id_path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.remove_from_kv_store(gemport_id_path)
+
+    def get_current_alloc_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured alloc ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            alloc_id_list = json.loads(value)
+            if len(alloc_id_list) > 0:
+                return alloc_id_list
+
+        return None
+
+    def get_current_gemport_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured gemport ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+
+        path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            gemport_id_list = json.loads(value)
+            if len(gemport_id_list) > 0:
+                return gemport_id_list
+
+        return None
+
+    def update_alloc_ids_for_onu(self, pon_intf_onu_id, alloc_ids):
+        """
+        Update currently configured alloc ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.update_to_kv_store(
+            path, json.dumps(alloc_ids)
+        )
+
+    def update_gemport_ids_for_onu(self, pon_intf_onu_id, gemport_ids):
+        """
+        Update currently configured gemport ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        """
+        path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        self._kv_store.update_to_kv_store(
+            path, json.dumps(gemport_ids)
+        )
+
+    def _get_olt_vendor(self):
+        """
+        Get olt vendor variant
+
+        :return: type of olt vendor
+        """
+        olt_vendor = None
+        if self.extra_args and len(self.extra_args) > 0:
+            parser = OltVendorArgumentParser(add_help=False)
+            parser.add_argument('--olt_vendor', '-o', action='store',
+                                choices=['default', 'asfvolt16', 'cigolt24'],
+                                default='default')
+            try:
+                args = parser.parse_args(shlex.split(self.extra_args))
+                self._log.debug('parsing-extra-arguments', args=args)
+                olt_vendor = args.olt_vendor
+            except ArgumentError as e:
+                self._log.exception('invalid-arguments: {}', e=e)
+            except Exception as e:
+                self._log.exception('option-parsing-error: {}', e=e)
+
+        return olt_vendor
+
+    def _generate_next_id(self, resource):
+        """
+        Generate unique id having OFFSET as start index.
+
+        :param resource: resource used to generate ID
+        :return int: generated id
+        """
+        pos = resource[PONResourceManager.POOL].find('0b0')
+        resource[PONResourceManager.POOL].set(1, pos)
+        return pos[0] + resource[PONResourceManager.START_IDX]
+
+    def _release_id(self, resource, unique_id):
+        """
+        Release unique id having OFFSET as start index.
+
+        :param resource: resource used to release ID
+        :param unique_id: id need to be released
+        """
+        pos = ((int(unique_id)) - resource[PONResourceManager.START_IDX])
+        resource[PONResourceManager.POOL].set(0, pos)
+
+    def _get_path(self, pon_intf_id, resource_type):
+        """
+        Get path for given resource type.
+
+        :param pon_intf_id: OLT PON interface id
+        :param resource_type: String to identify type of resource
+        :return: path for given resource type
+        """
+        path = None
+        if resource_type == PONResourceManager.ONU_ID:
+            path = self._get_onu_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.ALLOC_ID:
+            path = self._get_alloc_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.GEMPORT_ID:
+            path = self._get_gemport_id_resource_path(pon_intf_id)
+        else:
+            self._log.error("invalid-resource-pool-identifier")
+        return path
+
+    def _get_alloc_id_resource_path(self, pon_intf_id):
+        """
+        Get alloc id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: alloc id resource path
+        """
+        return PONResourceManager.ALLOC_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _get_gemport_id_resource_path(self, pon_intf_id):
+        """
+        Get gemport id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: gemport id resource path
+        """
+        return PONResourceManager.GEMPORT_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _get_onu_id_resource_path(self, pon_intf_id):
+        """
+        Get onu id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: onu id resource path
+        """
+        return PONResourceManager.ONU_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
+    def _update_resource(self, path, resource):
+        """
+        Update resource in resource kv store.
+
+        :param path: path to update resource
+        :param resource: resource need to be updated
+        :return boolean: True if resource updated in kv store else False
+        """
+        resource[PONResourceManager.POOL] = \
+            resource[PONResourceManager.POOL].bin
+        result = self._kv_store.update_to_kv_store(path, json.dumps(resource))
+        if result is True:
+            return True
+        return False
+
+    def _get_resource(self, path):
+        """
+        Get resource from kv store.
+
+        :param path: path to get resource
+        :return: resource if resource present in kv store else None
+        """
+        # get resource from kv store
+        result = self._kv_store.get_from_kv_store(path)
+        if result is None:
+            return result
+        self._log.info("dumping resource", result=result)
+        resource = result
+
+        if resource is not None:
+            # decode resource fetched from backend store to dictionary
+            resource = json.loads(resource)
+
+            # resource pool in backend store stored as binary string whereas to
+            # access the pool to generate/release IDs it need to be converted
+            # as BitArray
+            resource[PONResourceManager.POOL] = \
+                BitArray('0b' + resource[PONResourceManager.POOL])
+
+        return resource
+
+    def _format_resource(self, pon_intf_id, start_idx, end_idx):
+        """
+        Format resource as json.
+
+        :param pon_intf_id: OLT PON interface id
+        :param start_idx: start index for id pool
+        :param end_idx: end index for id pool
+        :return dictionary: resource formatted as dictionary
+        """
+        # Format resource as json to be stored in backend store
+        resource = dict()
+        resource[PONResourceManager.PON_INTF_ID] = pon_intf_id
+        resource[PONResourceManager.START_IDX] = start_idx
+        resource[PONResourceManager.END_IDX] = end_idx
+
+        # resource pool stored in backend store as binary string
+        resource[PONResourceManager.POOL] = BitArray(end_idx).bin
+
+        return json.dumps(resource)
diff --git a/python/adapters/common/structlog_setup.py b/python/adapters/common/structlog_setup.py
deleted file mode 100644
index 3401977..0000000
--- a/python/adapters/common/structlog_setup.py
+++ /dev/null
@@ -1,134 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""Setting up proper logging for Voltha"""
-
-import logging
-import logging.config
-from collections import OrderedDict
-
-import structlog
-from structlog.stdlib import BoundLogger, INFO
-
-try:
-    from thread import get_ident as _get_ident
-except ImportError:
-    from dummy_thread import get_ident as _get_ident
-
-
-class StructuredLogRenderer(object):
-    def __call__(self, logger, name, event_dict):
-        # in order to keep structured log data in event_dict to be forwarded as
-        # is, we need to pass it into the logger framework as the first
-        # positional argument.
-        args = (event_dict,)
-        kwargs = {}
-        return args, kwargs
-
-
-class PlainRenderedOrderedDict(OrderedDict):
-    """Our special version of OrderedDict that renders into string as a dict,
-       to make the log stream output cleaner.
-    """
-    def __repr__(self, _repr_running={}):
-        'od.__repr__() <==> repr(od)'
-        call_key = id(self), _get_ident()
-        if call_key in _repr_running:
-            return '...'
-        _repr_running[call_key] = 1
-        try:
-            if not self:
-                return '{}'
-            return '{%s}' % ", ".join("%s: %s" % (k, v)
-                                      for k, v in self.items())
-        finally:
-            del _repr_running[call_key]
-
-
-def setup_logging(log_config, instance_id, verbosity_adjust=0):
-    """
-    Set up logging such that:
-    - The primary logging entry method is structlog
-      (see http://structlog.readthedocs.io/en/stable/index.html)
-    - By default, the logging backend is Python standard lib logger
-    """
-
-    def add_exc_info_flag_for_exception(_, name, event_dict):
-        if name == 'exception':
-            event_dict['exc_info'] = True
-        return event_dict
-
-    def add_instance_id(_, __, event_dict):
-        event_dict['instance_id'] = instance_id
-        return event_dict
-
-    # Configure standard logging
-    logging.config.dictConfig(log_config)
-    logging.root.level -= 10 * verbosity_adjust
-
-    processors = [
-        add_exc_info_flag_for_exception,
-        structlog.processors.StackInfoRenderer(),
-        structlog.processors.format_exc_info,
-        add_instance_id,
-        StructuredLogRenderer(),
-    ]
-    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
-                        context_class=PlainRenderedOrderedDict,
-                        wrapper_class=BoundLogger,
-                        processors=processors)
-
-    # Mark first line of log
-    log = structlog.get_logger()
-    log.info("first-line")
-    return log
-
-
-def update_logging(instance_id, vcore_id):
-    """
-    Add the vcore id to the structured logger
-    :param vcore_id:  The assigned vcore id
-    :return: structure logger
-    """
-    def add_exc_info_flag_for_exception(_, name, event_dict):
-        if name == 'exception':
-            event_dict['exc_info'] = True
-        return event_dict
-
-    def add_instance_id(_, __, event_dict):
-        if instance_id is not None:
-            event_dict['instance_id'] = instance_id
-        return event_dict
-
-    def add_vcore_id(_, __, event_dict):
-        if vcore_id is not None:
-            event_dict['vcore_id'] = vcore_id
-        return event_dict
-
-    processors = [
-        add_exc_info_flag_for_exception,
-        structlog.processors.StackInfoRenderer(),
-        structlog.processors.format_exc_info,
-        add_instance_id,
-        add_vcore_id,
-        StructuredLogRenderer(),
-    ]
-    structlog.configure(processors=processors)
-
-    # Mark first line of log
-    log = structlog.get_logger()
-    log.info("updated-logger")
-    return log
diff --git a/python/adapters/common/utils/__init__.py b/python/adapters/common/utils/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/adapters/common/utils/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/adapters/common/utils/asleep.py b/python/adapters/common/utils/asleep.py
deleted file mode 100644
index 10d1ce3..0000000
--- a/python/adapters/common/utils/asleep.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-""" Async sleep (asleep) method and other twisted goodies """
-
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-
-
-def asleep(dt):
-    """
-    Async (event driven) wait for given time period (in seconds)
-    :param dt: Delay in seconds
-    :return: Deferred to be fired with value None when time expires.
-    """
-    d = Deferred()
-    reactor.callLater(dt, lambda: d.callback(None))
-    return d
diff --git a/python/adapters/common/utils/consulhelpers.py b/python/adapters/common/utils/consulhelpers.py
deleted file mode 100644
index 6060ba3..0000000
--- a/python/adapters/common/utils/consulhelpers.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some consul related convenience functions
-"""
-
-from structlog import get_logger
-from consul import Consul
-from random import randint
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4
-
-log = get_logger()
-
-
-def connect_to_consult(consul_endpoint):
-    log.debug('getting-service-endpoint', consul=consul_endpoint)
-
-    host = consul_endpoint.split(':')[0].strip()
-    port = int(consul_endpoint.split(':')[1].strip())
-
-    return Consul(host=host, port=port)
-
-
-def verify_all_services_healthy(consul_endpoint, service_name=None,
-                                number_of_expected_services=None):
-    """
-    Verify in consul if any service is healthy
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service to check, optional
-    :param number_of_expected_services number of services to check for, optional
-    :return: true if healthy, false otherwise
-    """
-
-    def check_health(service):
-        _, serv_health = consul.health.service(service, passing=True)
-        return not serv_health == []
-
-    consul = connect_to_consult(consul_endpoint)
-
-    if service_name is not None:
-        return check_health(service_name)
-
-    services = get_all_services(consul_endpoint)
-
-    items = services.keys()
-
-    if number_of_expected_services is not None and \
-                    len(items) != number_of_expected_services:
-        return False
-
-    for item in items:
-        if not check_health(item):
-            return False
-
-    return True
-
-
-def get_all_services(consul_endpoint):
-    log.debug('getting-service-verify-health')
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.services()
-
-    return services
-
-
-def get_all_instances_of_service(consul_endpoint, service_name):
-    log.debug('getting-all-instances-of-service', service=service_name)
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.service(service_name)
-
-    for service in services:
-        log.debug('service',
-                  name=service['ServiceName'],
-                  serviceid=service['ServiceID'],
-                  serviceport=service['ServicePort'],
-                  createindex=service['CreateIndex'])
-
-    return services
-
-
-def get_endpoint_from_consul(consul_endpoint, service_name):
-    """
-    Get endpoint of service_name from consul.
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service for which endpoint
-                         needs to be found.
-    :return: service endpoint if available, else exit.
-    """
-    log.debug('getting-service-info', service=service_name)
-
-    consul = connect_to_consult(consul_endpoint)
-    _, services = consul.catalog.service(service_name)
-
-    if len(services) == 0:
-        raise Exception(
-            'Cannot find service {} in consul'.format(service_name))
-        os.exit(1)
-
-    """ Get host IPV4 address
-    """
-    local_ipv4 = get_my_primary_local_ipv4()
-    """ If host IP address from where the request came in matches
-        the IP address of the requested service's host IP address,
-        pick the endpoint
-    """
-    for i in range(len(services)):
-        service = services[i]
-        if service['ServiceAddress'] == local_ipv4:
-            log.debug("picking address locally")
-            endpoint = '{}:{}'.format(service['ServiceAddress'],
-                                      service['ServicePort'])
-            return endpoint
-
-    """ If service is not available locally, picak a random
-        endpoint for the service from the list
-    """
-    service = services[randint(0, len(services) - 1)]
-    endpoint = '{}:{}'.format(service['ServiceAddress'],
-                              service['ServicePort'])
-
-    return endpoint
-
-
-def get_healthy_instances(consul_endpoint, service_name=None,
-                          number_of_expected_services=None):
-    """
-    Verify in consul if any service is healthy
-    :param consul_endpoint: a <host>:<port> string
-    :param service_name: name of service to check, optional
-    :param number_of_expected_services number of services to check for, optional
-    :return: true if healthy, false otherwise
-    """
-
-    def check_health(service):
-        _, serv_health = consul.health.service(service, passing=True)
-        return not serv_health == []
-
-    consul = connect_to_consult(consul_endpoint)
-
-    if service_name is not None:
-        return check_health(service_name)
-
-    services = get_all_services(consul_endpoint)
-
-    items = services.keys()
-
-    if number_of_expected_services is not None and \
-                    len(items) != number_of_expected_services:
-        return False
-
-    for item in items:
-        if not check_health(item):
-            return False
-
-    return True
-
-
-if __name__ == '__main__':
-    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
-    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
-    # print get_healthy_instances('10.100.198.220:8500')
-    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/python/adapters/common/utils/deferred_utils.py b/python/adapters/common/utils/deferred_utils.py
deleted file mode 100644
index 3c55c1a..0000000
--- a/python/adapters/common/utils/deferred_utils.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-from twisted.internet.error import AlreadyCalled
-
-
-class TimeOutError(Exception): pass
-
-
-class DeferredWithTimeout(Deferred):
-    """
-    Deferred with a timeout. If neither the callback nor the errback method
-    is not called within the given time, the deferred's errback will be called
-    with a TimeOutError() exception.
-
-    All other uses are the same as of Deferred().
-    """
-    def __init__(self, timeout=1.0):
-        Deferred.__init__(self)
-        self._timeout = timeout
-        self.timer = reactor.callLater(timeout, self.timed_out)
-
-    def timed_out(self):
-        self.errback(
-            TimeOutError('timed out after {} seconds'.format(self._timeout)))
-
-    def callback(self, result):
-        self._cancel_timer()
-        return Deferred.callback(self, result)
-
-    def errback(self, fail):
-        self._cancel_timer()
-        return Deferred.errback(self, fail)
-
-    def cancel(self):
-        self._cancel_timer()
-        return Deferred.cancel(self)
-
-    def _cancel_timer(self):
-        try:
-            self.timer.cancel()
-        except AlreadyCalled:
-            pass
-
diff --git a/python/adapters/common/utils/dockerhelpers.py b/python/adapters/common/utils/dockerhelpers.py
deleted file mode 100644
index 4620aef..0000000
--- a/python/adapters/common/utils/dockerhelpers.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some docker related convenience functions
-"""
-from datetime import datetime
-from concurrent.futures import ThreadPoolExecutor
-
-import os
-import socket
-from structlog import get_logger
-
-from docker import Client, errors
-
-
-docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
-log = get_logger()
-
-def get_my_containers_name():
-    """
-    Return the docker containers name in which this process is running.
-    To look up the container name, we use the container ID extracted from the
-    $HOSTNAME environment variable (which is set by docker conventions).
-    :return: String with the docker container name (or None if any issue is
-             encountered)
-    """
-    my_container_id = os.environ.get('HOSTNAME', None)
-
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        info = docker_cli.inspect_container(my_container_id)
-
-    except Exception, e:
-        log.exception('failed', my_container_id=my_container_id, e=e)
-        raise
-
-    name = info['Name'].lstrip('/')
-
-    return name
-
-def get_all_running_containers():
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        containers = docker_cli.containers()
-
-    except Exception, e:
-        log.exception('failed', e=e)
-        raise
-
-    return containers
-
-def inspect_container(id):
-    try:
-        docker_cli = Client(base_url=docker_socket)
-        info = docker_cli.inspect_container(id)
-    except Exception, e:
-        log.exception('failed-inspect-container', id=id, e=e)
-        raise
-
-    return info
-
diff --git a/python/adapters/common/utils/grpc_utils.py b/python/adapters/common/utils/grpc_utils.py
deleted file mode 100644
index 8df630e..0000000
--- a/python/adapters/common/utils/grpc_utils.py
+++ /dev/null
@@ -1,109 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Utilities to handle gRPC server and client side code in a Twisted environment
-"""
-import structlog
-from concurrent.futures import Future
-from twisted.internet import reactor
-from twisted.internet.defer import Deferred
-from twisted.python.threadable import isInIOThread
-
-
-log = structlog.get_logger()
-
-
-def twisted_async(func):
-    """
-    This decorator can be used to implement a gRPC method on the twisted
-    thread, allowing asynchronous programming in Twisted while serving
-    a gRPC call.
-
-    gRPC methods normally are called on the futures.ThreadPool threads,
-    so these methods cannot directly use Twisted protocol constructs.
-    If the implementation of the methods needs to touch Twisted, it is
-    safer (or mandatory) to wrap the method with this decorator, which will
-    call the inner method from the external thread and ensure that the
-    result is passed back to the foreign thread.
-
-    Example usage:
-
-    When implementing a gRPC server, typical pattern is:
-
-    class SpamService(SpamServicer):
-
-        def GetBadSpam(self, request, context):
-            '''this is called from a ThreadPoolExecutor thread'''
-            # generally unsafe to make Twisted calls
-
-        @twisted_async
-        def GetSpamSafely(self, request, context):
-            '''this method now is executed on the Twisted main thread
-            # safe to call any Twisted protocol functions
-
-        @twisted_async
-        @inlineCallbacks
-        def GetAsyncSpam(self, request, context):
-            '''this generator can use inlineCallbacks Twisted style'''
-            result = yield some_async_twisted_call(request)
-            returnValue(result)
-
-    """
-    def in_thread_wrapper(*args, **kw):
-
-        if isInIOThread():
-
-            return func(*args, **kw)
-
-        f = Future()
-
-        def twisted_wrapper():
-            try:
-                d = func(*args, **kw)
-                if isinstance(d, Deferred):
-
-                    def _done(result):
-                        f.set_result(result)
-                        f.done()
-
-                    def _error(e):
-                        f.set_exception(e)
-                        f.done()
-
-                    d.addCallback(_done)
-                    d.addErrback(_error)
-
-                else:
-                    f.set_result(d)
-                    f.done()
-
-            except Exception, e:
-                f.set_exception(e)
-                f.done()
-
-        reactor.callFromThread(twisted_wrapper)
-        try:
-            result = f.result()
-        except Exception, e:
-            log.exception(e=e, func=func, args=args, kw=kw)
-            raise
-
-        return result
-
-    return in_thread_wrapper
-
-
diff --git a/python/adapters/common/utils/id_generation.py b/python/adapters/common/utils/id_generation.py
deleted file mode 100644
index e0fea1c..0000000
--- a/python/adapters/common/utils/id_generation.py
+++ /dev/null
@@ -1,116 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# """ ID generation utils """
-
-from uuid import uuid4
-
-
-BROADCAST_CORE_ID=hex(0xFFFF)[2:]
-
-def get_next_core_id(current_id_in_hex_str):
-    """
-    :param current_id_in_hex_str: a hex string of the maximum core id 
-    assigned without the leading 0x characters
-    :return: current_id_in_hex_str + 1 in hex string 
-    """
-    if not current_id_in_hex_str or current_id_in_hex_str == '':
-        return '0001'
-    else:
-        return format(int(current_id_in_hex_str, 16) + 1, '04x')
-
-
-def create_cluster_logical_device_ids(core_id, switch_id):
-    """
-    Creates a logical device id and an OpenFlow datapath id that is unique 
-    across the Voltha cluster.
-    The returned logical device id  represents a 64 bits integer where the
-    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
-    the datapath id the core id is set to '0000' as it is not used for voltha
-    core routing
-    :param core_id: string
-    :param switch_id:int
-    :return: cluster logical device id and OpenFlow datapath id
-    """
-    switch_id = format(switch_id, '012x')
-    core_in_hex=format(int(core_id, 16), '04x')
-    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
-    dpid_id = '{}{}'.format('0000', switch_id[-12:])
-    return ld_id, int(dpid_id, 16)
-
-def is_broadcast_core_id(id):
-    assert id and len(id) == 16
-    return id[:4] == BROADCAST_CORE_ID
-
-def create_empty_broadcast_id():
-    """
-    Returns an empty broadcast id (ffff000000000000). The id is used to
-    dispatch xPON objects across all the Voltha instances.
-    :return: An empty broadcast id
-    """
-    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
-
-def create_cluster_id():
-    """
-    Returns an id that is common across all voltha instances.  The id  
-    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
-    object while the upper 16 bits refers a broadcast core_id
-    :return: An common id across all Voltha instances
-    """
-    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
-
-def create_cluster_device_id(core_id):
-    """
-    Creates a device id that is unique across the Voltha cluster.
-    The device id is a str of 64 bits.  The lower 48 bits refers to the 
-    device id while the upper 16 bits refers to the core id.
-    :param core_id: string
-    :return: cluster device id
-    """
-    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
-
-
-def get_core_id_from_device_id(device_id):
-    # Device id is a string and the first 4 characters represent the core_id
-    assert device_id and len(device_id) == 16
-    # Get the leading 4 hexs and remove leading 0's
-    return device_id[:4]
-
-
-def get_core_id_from_logical_device_id(logical_device_id):
-    """ 
-    Logical Device id is a string and the first 4 characters represent the 
-    core_id
-    :param logical_device_id: 
-    :return: core_id string
-    """
-    assert logical_device_id and len(logical_device_id) == 16
-    # Get the leading 4 hexs and remove leading 0's
-    return logical_device_id[:4]
-
-
-def get_core_id_from_datapath_id(datapath_id):
-    """
-    datapath id is a uint64 where:
-        - low 48 bits -> switch_id
-        - high 16 bits -> core id
-    :param datapath_id: 
-    :return: core_id string
-    """
-    assert datapath_id
-    # Get the hex string and remove the '0x' prefix
-    id_in_hex_str = hex(datapath_id)[2:]
-    assert len(id_in_hex_str) > 12
-    return id_in_hex_str[:-12]
diff --git a/python/adapters/common/utils/indexpool.py b/python/adapters/common/utils/indexpool.py
deleted file mode 100644
index 858cb3a..0000000
--- a/python/adapters/common/utils/indexpool.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-from bitstring import BitArray
-import structlog
-
-log = structlog.get_logger()
-
-class IndexPool(object):
-    def __init__(self, max_entries, offset):
-        self.max_entries = max_entries
-        self.offset = offset
-        self.indices = BitArray(self.max_entries)
-
-    def get_next(self):
-        try:
-            _pos = self.indices.find('0b0')
-            self.indices.set(1, _pos)
-            return self.offset + _pos[0]
-        except IndexError:
-            log.info("exception-fail-to-allocate-id-all-bits-in-use")
-            return None
-
-    def allocate(self, index):
-        try:
-            _pos = index - self.offset
-            if not (0 <= _pos < self.max_entries):
-                log.info("{}-out-of-range".format(index))
-                return None
-            if self.indices[_pos]:
-                log.info("{}-is-already-allocated".format(index))
-                return None
-            self.indices.set(1, _pos)
-            return index
-
-        except IndexError:
-            return None
-
-    def release(self, index):
-        index -= self.offset
-        _pos = (index,)
-        try:
-            self.indices.set(0, _pos)
-        except IndexError:
-            log.info("bit-position-{}-out-of-range".format(index))
-
-    #index or multiple indices to set all of them to 1 - need to be a tuple
-    def pre_allocate(self, index):
-        if(isinstance(index, tuple)):
-            _lst = list(index)
-            for i in range(len(_lst)):
-                _lst[i] -= self.offset
-            index = tuple(_lst)
-            self.indices.set(1, index)
diff --git a/python/adapters/common/utils/json_format.py b/python/adapters/common/utils/json_format.py
deleted file mode 100644
index c18d013..0000000
--- a/python/adapters/common/utils/json_format.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""
-Monkey patched json_format to allow best effort decoding of Any fields.
-Use the additional flag (strict_any_handling=False) to trigger the
-best-effort behavior. Omit the flag, or just use the original json_format
-module fot the strict behavior.
-"""
-
-from google.protobuf import json_format
-
-class _PatchedPrinter(json_format._Printer):
-
-    def __init__(self, including_default_value_fields=False,
-                 preserving_proto_field_name=False,
-                 strict_any_handling=False):
-        super(_PatchedPrinter, self).__init__(including_default_value_fields,
-                                              preserving_proto_field_name)
-        self.strict_any_handling = strict_any_handling
-
-    def _BestEffortAnyMessageToJsonObject(self, msg):
-        try:
-            res = self._AnyMessageToJsonObject(msg)
-        except TypeError:
-            res = self._RegularMessageToJsonObject(msg, {})
-        return res
-
-
-def MessageToDict(message,
-                  including_default_value_fields=False,
-                  preserving_proto_field_name=False,
-                  strict_any_handling=False):
-    """Converts protobuf message to a JSON dictionary.
-
-    Args:
-      message: The protocol buffers message instance to serialize.
-      including_default_value_fields: If True, singular primitive fields,
-          repeated fields, and map fields will always be serialized.  If
-          False, only serialize non-empty fields.  Singular message fields
-          and oneof fields are not affected by this option.
-      preserving_proto_field_name: If True, use the original proto field
-          names as defined in the .proto file. If False, convert the field
-          names to lowerCamelCase.
-      strict_any_handling: If True, converion will error out (like in the
-          original method) if an Any field with value for which the Any type
-          is not loaded is encountered. If False, the conversion will leave
-          the field un-packed, but otherwise will continue.
-
-    Returns:
-      A dict representation of the JSON formatted protocol buffer message.
-    """
-    printer = _PatchedPrinter(including_default_value_fields,
-                              preserving_proto_field_name,
-                              strict_any_handling=strict_any_handling)
-    # pylint: disable=protected-access
-    return printer._MessageToJsonObject(message)
-
-
-def MessageToJson(message,
-                  including_default_value_fields=False,
-                  preserving_proto_field_name=False,
-                  strict_any_handling=False):
-  """Converts protobuf message to JSON format.
-
-  Args:
-    message: The protocol buffers message instance to serialize.
-    including_default_value_fields: If True, singular primitive fields,
-        repeated fields, and map fields will always be serialized.  If
-        False, only serialize non-empty fields.  Singular message fields
-        and oneof fields are not affected by this option.
-    preserving_proto_field_name: If True, use the original proto field
-        names as defined in the .proto file. If False, convert the field
-        names to lowerCamelCase.
-    strict_any_handling: If True, converion will error out (like in the
-        original method) if an Any field with value for which the Any type
-        is not loaded is encountered. If False, the conversion will leave
-        the field un-packed, but otherwise will continue.
-
-  Returns:
-    A string containing the JSON formatted protocol buffer message.
-  """
-  printer = _PatchedPrinter(including_default_value_fields,
-                            preserving_proto_field_name,
-                            strict_any_handling=strict_any_handling)
-  return printer.ToJsonString(message)
-
-
-json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
-    '_BestEffortAnyMessageToJsonObject',
-    '_ConvertAnyMessage'
-]
-
-json_format._Printer._BestEffortAnyMessageToJsonObject = \
-    json_format._Printer._AnyMessageToJsonObject
diff --git a/python/adapters/common/utils/message_queue.py b/python/adapters/common/utils/message_queue.py
deleted file mode 100644
index 2b4257a..0000000
--- a/python/adapters/common/utils/message_queue.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from twisted.internet.defer import Deferred
-from twisted.internet.defer import succeed
-
-
-class MessageQueue(object):
-    """
-    An event driven queue, similar to twisted.internet.defer.DeferredQueue
-    but which allows selective dequeing based on a predicate function.
-    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
-    limit.
-    """
-
-    def __init__(self):
-        self.waiting = []  # tuples of (d, predicate)
-        self.queue = []  # messages piling up here if no one is waiting
-
-    def reset(self):
-        """
-        Purge all content as well as waiters (by errback-ing their entries).
-        :return: None
-        """
-        for d, _ in self.waiting:
-            d.errback(Exception('mesage queue reset() was called'))
-        self.waiting = []
-        self.queue = []
-
-    def _cancelGet(self, d):
-        """
-        Remove a deferred from our waiting list.
-        :param d: The deferred that was been canceled.
-        :return: None
-        """
-        for i in range(len(self.waiting)):
-            if self.waiting[i][0] is d:
-                self.waiting.pop(i)
-
-    def put(self, obj):
-        """
-        Add an object to this queue
-        :param obj: arbitrary object that will be added to the queue
-        :return:
-        """
-
-        # if someone is waiting for this, return right away
-        for i in range(len(self.waiting)):
-            d, predicate = self.waiting[i]
-            if predicate is None or predicate(obj):
-                self.waiting.pop(i)
-                d.callback(obj)
-                return
-
-        # otherwise...
-        self.queue.append(obj)
-
-    def get(self, predicate=None):
-        """
-        Attempt to retrieve and remove an object from the queue that
-        matches the optional predicate.
-        :return: Deferred which fires with the next object available.
-        If predicate was provided, only objects for which
-        predicate(obj) is True will be considered.
-        """
-        for i in range(len(self.queue)):
-            msg = self.queue[i]
-            if predicate is None or predicate(msg):
-                self.queue.pop(i)
-                return succeed(msg)
-
-        # there were no matching entries if we got here, so we wait
-        d = Deferred(canceller=self._cancelGet)
-        self.waiting.append((d, predicate))
-        return d
-
-
diff --git a/python/adapters/common/utils/nethelpers.py b/python/adapters/common/utils/nethelpers.py
deleted file mode 100644
index b17aced..0000000
--- a/python/adapters/common/utils/nethelpers.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Some network related convenience functions
-"""
-
-from netifaces import AF_INET
-
-import netifaces as ni
-import netaddr
-
-
-def _get_all_interfaces():
-    m_interfaces = []
-    for iface in ni.interfaces():
-        m_interfaces.append((iface, ni.ifaddresses(iface)))
-    return m_interfaces
-
-
-def _get_my_primary_interface():
-    gateways = ni.gateways()
-    assert 'default' in gateways, \
-        ("No default gateway on host/container, "
-         "cannot determine primary interface")
-    default_gw_index = gateways['default'].keys()[0]
-    # gateways[default_gw_index] has the format (example):
-    # [('10.15.32.1', 'en0', True)]
-    interface_name = gateways[default_gw_index][0][1]
-    return interface_name
-
-
-def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
-    if not inter_core_subnet:
-        return _get_my_primary_local_ipv4(ifname)
-    # My IP should belong to the specified subnet
-    for iface in ni.interfaces():
-        addresses = ni.ifaddresses(iface)
-        if AF_INET in addresses:
-            m_ip = addresses[AF_INET][0]['addr']
-            _ip = netaddr.IPAddress(m_ip).value
-            m_network = netaddr.IPNetwork(inter_core_subnet)
-            if _ip >= m_network.first and _ip <= m_network.last:
-                return m_ip
-    return None
-
-
-def get_my_primary_interface(pon_subnet=None):
-    if not pon_subnet:
-        return _get_my_primary_interface()
-    # My interface should have an IP that belongs to the specified subnet
-    for iface in ni.interfaces():
-        addresses = ni.ifaddresses(iface)
-        if AF_INET in addresses:
-            m_ip = addresses[AF_INET][0]['addr']
-            m_ip = netaddr.IPAddress(m_ip).value
-            m_network = netaddr.IPNetwork(pon_subnet)
-            if m_ip >= m_network.first and m_ip <= m_network.last:
-                return iface
-    return None
-
-
-def _get_my_primary_local_ipv4(ifname=None):
-    try:
-        ifname = get_my_primary_interface() if ifname is None else ifname
-        addresses = ni.ifaddresses(ifname)
-        ipv4 = addresses[AF_INET][0]['addr']
-        return ipv4
-    except Exception as e:
-        return None
-
-if __name__ == '__main__':
-    print get_my_primary_local_ipv4()
diff --git a/python/adapters/common/utils/ordered_weakvalue_dict.py b/python/adapters/common/utils/ordered_weakvalue_dict.py
deleted file mode 100644
index 9ea739a..0000000
--- a/python/adapters/common/utils/ordered_weakvalue_dict.py
+++ /dev/null
@@ -1,48 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-from _weakref import ref
-from weakref import KeyedRef
-from collections import OrderedDict
-
-
-class OrderedWeakValueDict(OrderedDict):
-    """
-    Modified OrderedDict to use weak references as values. Entries disappear
-    automatically if the referred value has no more strong reference pointing
-    ot it.
-
-    Warning, this is not a complete implementation, only what is needed for
-    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
-    """
-    def __init__(self, *args, **kw):
-        def remove(wr, selfref=ref(self)):
-            self = selfref()
-            if self is not None:
-                super(OrderedWeakValueDict, self).__delitem__(wr.key)
-        self._remove = remove
-        super(OrderedWeakValueDict, self).__init__(*args, **kw)
-
-    def __setitem__(self, key, value):
-        super(OrderedWeakValueDict, self).__setitem__(
-            key, KeyedRef(value, self._remove, key))
-
-    def __getitem__(self, key):
-        o = super(OrderedWeakValueDict, self).__getitem__(key)()
-        if o is None:
-            raise KeyError, key
-        else:
-            return o
-
diff --git a/python/adapters/common/utils/registry.py b/python/adapters/common/utils/registry.py
deleted file mode 100644
index 270bd71..0000000
--- a/python/adapters/common/utils/registry.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-Simple component registry to provide centralized access to any registered
-components.
-"""
-from collections import OrderedDict
-from zope.interface import Interface
-
-
-class IComponent(Interface):
-    """
-    A Voltha Component
-    """
-
-    def start():
-        """
-        Called once the componet is instantiated. Can be used for async
-        initialization.
-        :return: (None or Deferred)
-        """
-
-    def stop():
-        """
-        Called once before the component is unloaded. Can be used for async
-        cleanup operations.
-        :return: (None or Deferred)
-        """
-
-
-class Registry(object):
-
-    def __init__(self):
-        self.components = OrderedDict()
-
-    def register(self, name, component):
-        assert IComponent.providedBy(component)
-        assert name not in self.components
-        self.components[name] = component
-        return component
-
-    def unregister(self, name):
-        if name in self.components:
-            del self.components[name]
-
-    def __call__(self, name):
-        return self.components[name]
-
-    def iterate(self):
-        return self.components.values()
-
-
-# public shared registry
-registry = Registry()
diff --git a/python/adapters/docker/Dockerfile.adapter_ponsim_olt b/python/adapters/docker/Dockerfile.adapter_ponsim_olt
deleted file mode 100644
index 209200d..0000000
--- a/python/adapters/docker/Dockerfile.adapter_ponsim_olt
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ARG TAG=latest
-ARG REGISTRY=
-ARG REPOSITORY=
-
-FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
-FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Bundle app source
-RUN mkdir /adapters && touch /adapters/__init__.py
-ENV PYTHONPATH=/adapters
-COPY common /adapters/adapters/common
-COPY kafka /adapters/adapters/kafka
-COPY ./*.py /adapters/adapters/
-#COPY pki /voltha/pki
-COPY ponsim_olt /adapters/adapters/ponsim_olt
-RUN touch /adapters/adapters/__init__.py
-
-
-# Copy in the generated GRPC proto code
-COPY --from=protos /protos/voltha /adapters/adapters/protos
-COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
-RUN touch /adapters/adapters/protos/__init__.py
-RUN touch /adapters/adapters/protos/third_party/google/__init__.py
-
-# Exposing process and default entry point
-# CMD ["python", "/adapters/ponsim_olt/main.py"]
diff --git a/python/adapters/docker/Dockerfile.adapter_ponsim_onu b/python/adapters/docker/Dockerfile.adapter_ponsim_onu
deleted file mode 100644
index d0d3e36..0000000
--- a/python/adapters/docker/Dockerfile.adapter_ponsim_onu
+++ /dev/null
@@ -1,42 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-ARG TAG=latest
-ARG REGISTRY=
-ARG REPOSITORY=
-
-FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
-FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Bundle app source
-RUN mkdir /adapters && touch /adapters/__init__.py
-ENV PYTHONPATH=/adapters
-COPY common /adapters/adapters/common
-COPY kafka /adapters/adapters/kafka
-COPY ./*.py /adapters/adapters/
-#COPY pki /voltha/pki
-COPY ponsim_onu /adapters/adapters/ponsim_onu
-RUN touch /adapters/adapters/__init__.py
-
-
-# Copy in the generated GRPC proto code
-COPY --from=protos /protos/voltha /adapters/adapters/protos
-COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
-COPY protos/third_party/__init__.py /adapters/adapters/protos/third_party
-RUN touch /adapters/adapters/protos/__init__.py
-RUN touch /adapters/adapters/protos/third_party/google/__init__.py
-
-# Exposing process and default entry point
-# CMD ["python", "/adapters/ponsim_onu/main.py"]
diff --git a/python/adapters/docker/Dockerfile.base b/python/adapters/docker/Dockerfile.base
deleted file mode 100644
index 1b912e0..0000000
--- a/python/adapters/docker/Dockerfile.base
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright 2016 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM ubuntu:xenial
-
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-# Update to have latest images
-RUN apt-get update && \
-    apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
-
-COPY requirements.txt /tmp/requirements.txt
-
-# pip install cython enum34 six && \
-# Install app dependencies
-RUN wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
-    dpkg -i *.deb && \
-    rm -f *.deb && \
-    apt-get update && \
-    apt-get install -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
-    pip install -r /tmp/requirements.txt && \
-    apt-get purge -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
-    apt-get autoremove -y
diff --git a/python/adapters/docker/Dockerfile.protoc b/python/adapters/docker/Dockerfile.protoc
deleted file mode 100644
index eef6f54..0000000
--- a/python/adapters/docker/Dockerfile.protoc
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright 2018 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ARG REGISTRY=
-ARG PROTOC_PREFIX=/usr/local
-ARG ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
-ARG PROTOC=${PROTOC_PREFIX}/bin/protoc
-ARG PROTOC_VERSION=3.3.0
-
-FROM ${REGISTRY}debian:stretch-slim
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-ENV PROTOC_PREFIX=/usr/local
-ENV ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
-ENV PROTOC=${PROTOC_PREFIX}/bin/protoc
-ENV PROTOC_VERSION=3.3.0
-ENV PROTOC_DOWNLOAD_PREFIX=https://github.com/google/protobuf/releases/download
-ENV PROTOC_DIR=protobuf-${PROTOC_VERSION}
-ENV PROTOC_TARBALL=protobuf-python-${PROTOC_VERSION}.tar.gz
-ENV PROTOC_DOWNLOAD_URI=${PROTOC_DOWNLOAD_PREFIX}/v${PROTOC_VERSION}/${PROTOC_TARBALL}
-
-RUN apt-get update -y && apt-get install -y wget build-essential python-dev python-pip
-RUN pip install grpcio-tools==1.3.5
-WORKDIR /build
-RUN wget -q --no-check-certificate ${PROTOC_DOWNLOAD_URI}
-RUN tar --strip-components=1 -zxf ${PROTOC_TARBALL}
-RUN ./configure --prefix=${PROTOC_PREFIX}
-RUN make install
diff --git a/python/adapters/docker/Dockerfile.protos b/python/adapters/docker/Dockerfile.protos
deleted file mode 100644
index db70d13..0000000
--- a/python/adapters/docker/Dockerfile.protos
+++ /dev/null
@@ -1,36 +0,0 @@
-# Copyright 2018 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ARG REGISTRY=
-ARG REPOSITORY=
-ARG TAG=latest
-
-FROM ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} as builder
-MAINTAINER Voltha Community <info@opennetworking.org>
-
-COPY protos/third_party/google/api/*.proto /protos/google/api/
-COPY docker/config/Makefile.protos /protos/google/api/Makefile.protos
-COPY protos/*.proto /protos/voltha/
-COPY docker/config/Makefile.protos /protos/voltha/Makefile.protos
-
-WORKDIR /protos
-RUN make -f google/api/Makefile.protos google_api
-RUN touch /protos/google/__init__.py /protos/google/api/__init__.py
-
-WORKDIR /protos/voltha
-RUN make -f Makefile.protos build
-
-# Copy the files to a scrach based container to minimize its size
-FROM ${REGISTRY}scratch
-COPY --from=builder /protos/ /protos/
diff --git a/python/adapters/docker/config/Makefile.protos b/python/adapters/docker/config/Makefile.protos
deleted file mode 100644
index 12ff9e3..0000000
--- a/python/adapters/docker/config/Makefile.protos
+++ /dev/null
@@ -1,59 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Makefile to build all protobuf and gRPC related artifacts
-
-default: build
-
-PROTO_FILES := $(wildcard *.proto)
-PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
-PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
-PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
-PROTO_All_PB2_C_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2.pyc,$(f)))
-PROTO_ALL_PB2_GPRC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2_grpc.py,$(f)))
-PROTO_ALL_DESC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,.desc,$(f)))
-
-# Google API needs to be built from within the third party directory
-#
-google_api:
-	python -m grpc.tools.protoc \
-	    -I. \
-            --python_out=. \
-            --grpc_python_out=. \
-            --descriptor_set_out=google/api/annotations.desc \
-            --include_imports \
-            --include_source_info \
-        google/api/annotations.proto google/api/http.proto
-
-build: $(PROTO_PB2_FILES)
-
-%_pb2.py: %.proto
-	python -m grpc.tools.protoc \
-                -I. \
-                -I/protos \
-                --python_out=. \
-                --grpc_python_out=. \
-                --descriptor_set_out=$(basename $<).desc \
-                --include_imports \
-                --include_source_info \
-                $<
-
-clean:
-	rm -f $(PROTO_PB2_FILES) \
-		$(PROTO_ALL_DESC_FILES) \
-		$(PROTO_ALL_PB2_GPRC_FILES) \
-		$(PROTO_All_PB2_C_FILES) \
-		$(PROTO_PB2_GOOGLE_API)
diff --git a/python/adapters/env.sh b/python/adapters/env.sh
deleted file mode 100644
index f4f9f97..0000000
--- a/python/adapters/env.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# sourcing this file is needed to make local development and integration testing work
-export VOLTHA_BASE=$PWD
-
-# load local python virtualenv if exists, otherwise create it
-VENVDIR="venv-$(uname -s | tr '[:upper:]' '[:lower:]')"
-if [ ! -e "$VENVDIR/.built" ]; then
-    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
-    echo "Initializing OS-appropriate virtual env."
-    echo "This will take a few minutes."
-    echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
-    make venv
-fi
-. $VENVDIR/bin/activate
-
-# add top-level voltha dir to pythonpath
-export PYTHONPATH=$VOLTHA_BASE/$VENVDIR/lib/python2.7/site-packages:$PYTHONPATH:$VOLTHA_BASE:$VOLTHA_BASE/protos/third_party
diff --git a/python/adapters/iadapter.py b/python/adapters/iadapter.py
index ee4d116..04cb303 100644
--- a/python/adapters/iadapter.py
+++ b/python/adapters/iadapter.py
@@ -22,13 +22,13 @@
 from twisted.internet import reactor
 from zope.interface import implementer
 
-from adapters.interface import IAdapterInterface
-from adapters.protos.adapter_pb2 import Adapter
-from adapters.protos.adapter_pb2 import AdapterConfig
-from adapters.protos.common_pb2 import AdminState
-from adapters.protos.common_pb2 import LogLevel
-from adapters.protos.device_pb2 import DeviceType, DeviceTypes
-from adapters.protos.health_pb2 import HealthStatus
+from interface import IAdapterInterface
+from python.protos.adapter_pb2 import Adapter
+from python.protos.adapter_pb2 import AdapterConfig
+from python.protos.common_pb2 import AdminState
+from python.protos.common_pb2 import LogLevel
+from python.protos.device_pb2 import DeviceType, DeviceTypes
+from python.protos.health_pb2 import HealthStatus
 
 log = structlog.get_logger()
 
@@ -273,7 +273,7 @@
         handler.send_proxied_message(proxy_address, msg)
 
     def process_inter_adapter_message(self, msg):
-        log.info('process-inter-adapter-message', msg=msg)
+        log.debug('process-inter-adapter-message', msg=msg)
         # Unpack the header to know which device needs to handle this message
         handler = None
         if msg.header.proxy_device_id:
@@ -286,18 +286,11 @@
         if handler:
             reactor.callLater(0, handler.process_inter_adapter_message, msg)
 
-    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
-        def ldi_to_di(ldi):
-            di = self.logical_device_id_to_root_device_id.get(ldi)
-            if di is None:
-                logical_device = self.core_proxy.get_logical_device(ldi)
-                di = logical_device.root_device_id
-                self.logical_device_id_to_root_device_id[ldi] = di
-            return di
-
-        device_id = ldi_to_di(logical_device_id)
+    def receive_packet_out(self, device_id, egress_port_no, msg):
+        log.info('receive_packet_out', device_id=device_id,
+                 egress_port=egress_port_no, msg=msg)
         handler = self.devices_handlers[device_id]
-        handler.packet_out(egress_port_no, msg)
+        handler.packet_out(egress_port_no, msg.data)
 
 
 """
diff --git a/python/adapters/kafka/adapter_proxy.py b/python/adapters/kafka/adapter_proxy.py
index fad1093..769de80 100644
--- a/python/adapters/kafka/adapter_proxy.py
+++ b/python/adapters/kafka/adapter_proxy.py
@@ -21,9 +21,9 @@
 import structlog
 from uuid import uuid4
 from twisted.internet.defer import inlineCallbacks, returnValue
-from adapters.kafka.container_proxy import ContainerProxy
-from adapters.protos import third_party
-from adapters.protos.core_adapter_pb2 import InterAdapterHeader, \
+from container_proxy import ContainerProxy
+from python.protos import third_party
+from python.protos.core_adapter_pb2 import InterAdapterHeader, \
     InterAdapterMessage
 import time
 
diff --git a/python/adapters/kafka/adapter_request_facade.py b/python/adapters/kafka/adapter_request_facade.py
index 67f7869..cbae56d 100644
--- a/python/adapters/kafka/adapter_request_facade.py
+++ b/python/adapters/kafka/adapter_request_facade.py
@@ -22,11 +22,11 @@
 from twisted.internet.defer import inlineCallbacks
 from zope.interface import implementer
 
-from adapters.interface import IAdapterInterface
-from adapters.protos.core_adapter_pb2 import IntType, InterAdapterMessage
-from adapters.protos.device_pb2 import Device
-from adapters.protos.openflow_13_pb2 import FlowChanges, FlowGroups, Flows, \
-    FlowGroupChanges
+from python.adapters.interface import IAdapterInterface
+from python.protos.core_adapter_pb2 import IntType, InterAdapterMessage, StrType, Error, ErrorCode
+from python.protos.device_pb2 import Device
+from python.protos.openflow_13_pb2 import FlowChanges, FlowGroups, Flows, \
+    FlowGroupChanges, ofp_packet_out
 
 
 class MacAddressError(BaseException):
@@ -68,7 +68,8 @@
             device.Unpack(d)
             return True, self.adapter.adopt_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_ofp_device_info(self, device):
         d = Device()
@@ -76,17 +77,22 @@
             device.Unpack(d)
             return True, self.adapter.get_ofp_device_info(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_ofp_port_info(self, device, port_no):
         d = Device()
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         p = IntType()
-        port_no.Unpack(p)
+        if port_no:
+            port_no.Unpack(p)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="port-no-invalid")
 
         return True, self.adapter.get_ofp_port_info(d, p.val)
 
@@ -102,7 +108,8 @@
             device.Unpack(d)
             return True, self.adapter.disable_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def reenable_device(self, device):
         d = Device()
@@ -110,7 +117,8 @@
             device.Unpack(d)
             return True, self.adapter.reenable_device(d)
         else:
-            return False, d
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def reboot_device(self, device):
         d = Device()
@@ -118,7 +126,8 @@
             device.Unpack(d)
             return (True, self.adapter.reboot_device(d))
         else:
-            return (False, d)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def download_image(self, device, request):
         return self.adapter.download_image(device, request)
@@ -144,7 +153,8 @@
             device.Unpack(d)
             return (True, self.adapter.delete_device(d))
         else:
-            return (False, d)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
 
     def get_device_details(self, device):
         return self.adapter.get_device_details(device)
@@ -154,8 +164,8 @@
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         f = Flows()
         if flows:
             flows.Unpack(f)
@@ -171,8 +181,8 @@
         if device:
             device.Unpack(d)
         else:
-            return (False, d)
-
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="device-invalid")
         f = FlowChanges()
         if flow_changes:
             flow_changes.Unpack(f)
@@ -194,6 +204,33 @@
         if msg:
             msg.Unpack(m)
         else:
-            return (False, m)
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="msg-invalid")
 
         return (True, self.adapter.process_inter_adapter_message(m))
+
+
+    def receive_packet_out(self, deviceId, outPort, packet):
+        d_id = StrType()
+        if deviceId:
+            deviceId.Unpack(d_id)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="deviceid-invalid")
+
+        op = IntType
+        if outPort:
+            outPort.Unpack(op)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="outport-invalid")
+
+        p = ofp_packet_out()
+        if packet:
+            packet.Unpack(p)
+        else:
+            return False, Error(code=ErrorCode.INVALID_PARAMETERS,
+                                reason="packet-invalid")
+
+        return (True, self.adapter.receive_packet_out(d_id, op, p))
+
diff --git a/python/adapters/kafka/container_proxy.py b/python/adapters/kafka/container_proxy.py
index 79918cd..8c4e828 100644
--- a/python/adapters/kafka/container_proxy.py
+++ b/python/adapters/kafka/container_proxy.py
@@ -23,9 +23,9 @@
 from twisted.python import failure
 from zope.interface import implementer
 
-from adapters.common.utils.deferred_utils import DeferredWithTimeout, \
+from python.common.utils.deferred_utils import DeferredWithTimeout, \
     TimeOutError
-from adapters.common.utils.registry import IComponent
+from python.common.utils.registry import IComponent
 
 log = structlog.get_logger()
 
diff --git a/python/adapters/kafka/core_proxy.py b/python/adapters/kafka/core_proxy.py
index cc3f081..4bab30d 100644
--- a/python/adapters/kafka/core_proxy.py
+++ b/python/adapters/kafka/core_proxy.py
@@ -21,11 +21,11 @@
 from google.protobuf.message import Message
 from twisted.internet.defer import inlineCallbacks, returnValue
 
-from adapters.kafka.container_proxy import ContainerProxy
-from adapters.protos.common_pb2 import ID, ConnectStatus, OperStatus
-from adapters.protos.core_adapter_pb2 import StrType, BoolType, IntType
-from adapters.protos.device_pb2 import Device, Ports
-from adapters.protos.voltha_pb2 import CoreInstance
+from container_proxy import ContainerProxy
+from python.protos.common_pb2 import ID, ConnectStatus, OperStatus
+from python.protos.core_adapter_pb2 import StrType, BoolType, IntType, Packet
+from python.protos.device_pb2 import Device, Ports
+from python.protos.voltha_pb2 import CoreInstance
 
 log = structlog.get_logger()
 
@@ -243,7 +243,8 @@
         b = BoolType()
         b.val = init
         res = yield self.invoke(rpc="DevicePMConfigUpdate",
-                                device_pm_config=device_pm_config, init=b)
+                                device_pm_config=device_pm_config,
+                                init=b)
         returnValue(res)
 
     @ContainerProxy.wrap_request(None)
@@ -252,7 +253,8 @@
         log.debug("port_created")
         proto_id = ID()
         proto_id.id = device_id
-        res = yield self.invoke(rpc="PortCreated", device_id=proto_id,
+        res = yield self.invoke(rpc="PortCreated",
+                                device_id=proto_id,
                                 port=port)
         returnValue(res)
 
@@ -274,5 +276,16 @@
     def image_download_deleted(img_dnld):
         raise NotImplementedError()
 
-    def packet_in(device_id, egress_port_no, packet):
-        raise NotImplementedError()
+    def send_packet_in(self, device_id, port, packet):
+        log.debug("send_packet_in")
+        proto_id = ID()
+        proto_id.id = device_id
+        p = IntType()
+        p.val = port
+        pac = Packet()
+        pac.payload = packet
+        res = yield self.invoke(rpc="PacketIn",
+                                device_id=proto_id,
+                                port=p,
+                                packet=pac)
+        returnValue(res)
diff --git a/python/adapters/kafka/event_bus_publisher.py b/python/adapters/kafka/event_bus_publisher.py
index 011fdea..89b3385 100644
--- a/python/adapters/kafka/event_bus_publisher.py
+++ b/python/adapters/kafka/event_bus_publisher.py
@@ -25,7 +25,7 @@
 from google.protobuf.message import Message
 from simplejson import dumps
 
-from adapters.common.event_bus import EventBusClient
+from python.common.event_bus import EventBusClient
 
 log = structlog.get_logger()
 
diff --git a/python/adapters/kafka/kafka_inter_container_library.py b/python/adapters/kafka/kafka_inter_container_library.py
index 3f6f5eb..1d2b05c 100644
--- a/python/adapters/kafka/kafka_inter_container_library.py
+++ b/python/adapters/kafka/kafka_inter_container_library.py
@@ -25,10 +25,10 @@
     DeferredQueue, gatherResults
 from zope.interface import implementer
 
-from adapters.common.utils import asleep
-from adapters.common.utils.registry import IComponent
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.protos.core_adapter_pb2 import MessageType, Argument, \
+from python.common.utils import asleep
+from python.common.utils.registry import IComponent
+from kafka_proxy import KafkaProxy, get_kafka_proxy
+from python.protos.core_adapter_pb2 import MessageType, Argument, \
     InterContainerRequestBody, InterContainerMessage, Header, \
     InterContainerResponseBody
 
diff --git a/python/adapters/kafka/kafka_proxy.py b/python/adapters/kafka/kafka_proxy.py
index c11caa7..6dcb10f 100644
--- a/python/adapters/kafka/kafka_proxy.py
+++ b/python/adapters/kafka/kafka_proxy.py
@@ -23,9 +23,9 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from zope.interface import implementer
 
-from adapters.common.utils.consulhelpers import get_endpoint_from_consul
-from adapters.common.utils.registry import IComponent
-from adapters.kafka.event_bus_publisher import EventBusPublisher
+from python.common.utils.consulhelpers import get_endpoint_from_consul
+from python.common.utils.registry import IComponent
+from event_bus_publisher import EventBusPublisher
 
 log = get_logger()
 
diff --git a/python/adapters/ponsim_olt/main.py b/python/adapters/ponsim_olt/main.py
index 569e284..09b78fc 100755
--- a/python/adapters/ponsim_olt/main.py
+++ b/python/adapters/ponsim_olt/main.py
@@ -29,22 +29,22 @@
 from twisted.internet.task import LoopingCall
 from zope.interface import implementer
 
-from adapters.common.structlog_setup import setup_logging, update_logging
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.deferred_utils import TimeOutError
-from adapters.common.utils.dockerhelpers import get_my_containers_name
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+from python.common.structlog_setup import setup_logging, update_logging
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import TimeOutError
+from python.common.utils.dockerhelpers import get_my_containers_name
+from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
     get_my_primary_interface
-from adapters.common.utils.registry import registry, IComponent
-from adapters.kafka.adapter_proxy import AdapterProxy
-from adapters.kafka.adapter_request_facade import AdapterRequestFacade
-from adapters.kafka.core_proxy import CoreProxy
-from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+from python.common.utils.registry import registry, IComponent
+from python.adapters.kafka.adapter_proxy import AdapterProxy
+from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from python.adapters.kafka.core_proxy import CoreProxy
+from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
     get_messaging_proxy
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.ponsim_olt.ponsim_olt import PonSimOltAdapter
-from adapters.protos import third_party
-from adapters.protos.adapter_pb2 import AdapterConfig
+from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from ponsim_olt import PonSimOltAdapter
+from python.protos import third_party
+from python.protos.adapter_pb2 import AdapterConfig
 
 _ = third_party
 
diff --git a/python/adapters/ponsim_olt/ponsim_olt.py b/python/adapters/ponsim_olt/ponsim_olt.py
index 52fb63b..df834e5 100644
--- a/python/adapters/ponsim_olt/ponsim_olt.py
+++ b/python/adapters/ponsim_olt/ponsim_olt.py
@@ -23,6 +23,8 @@
 import structlog
 from google.protobuf.empty_pb2 import Empty
 from google.protobuf.json_format import MessageToDict
+from scapy.layers.inet import Raw
+import json
 from google.protobuf.message import Message
 from grpc._channel import _Rendezvous
 from scapy.layers.l2 import Ether, Dot1Q
@@ -31,25 +33,25 @@
 from twisted.internet.defer import inlineCallbacks, returnValue
 from twisted.internet.task import LoopingCall
 
-from adapters.common.frameio.frameio import BpfProgramFilter, hexify
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.registry import registry
-from adapters.iadapter import OltAdapter
-from adapters.kafka.kafka_proxy import get_kafka_proxy
-from adapters.protos import ponsim_pb2
-from adapters.protos import third_party
-from adapters.protos.common_pb2 import OperStatus, ConnectStatus
-from adapters.protos.core_adapter_pb2 import SwitchCapability, PortCapability, \
+from python.adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from python.common.utils.asleep import asleep
+from python.common.utils.registry import registry
+from python.adapters.iadapter import OltAdapter
+from python.adapters.kafka.kafka_proxy import get_kafka_proxy
+from python.protos import ponsim_pb2
+from python.protos import third_party
+from python.protos.common_pb2 import OperStatus, ConnectStatus
+from python.protos.core_adapter_pb2 import SwitchCapability, PortCapability, \
     InterAdapterMessageType, InterAdapterResponseBody
-from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
-from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
-from adapters.protos.logical_device_pb2 import LogicalPort
-from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from python.protos.device_pb2 import Port, PmConfig, PmConfigs
+from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from python.protos.logical_device_pb2 import LogicalPort
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPF_1GB_FD, \
     OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
     ofp_switch_features, ofp_desc
-from adapters.protos.openflow_13_pb2 import ofp_port
-from adapters.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest
+from python.protos.openflow_13_pb2 import ofp_port
+from python.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest
 
 _ = third_party
 log = structlog.get_logger()
@@ -157,33 +159,11 @@
         self.device = device
         self.lc = None
 
+    # TODO: Implement code to send to kafka cluster directly instead of
+    # going through the voltha core.
     def send_alarm(self, context_data, alarm_data):
-        try:
-            current_context = {}
-            for key, value in context_data.__dict__.items():
-                current_context[key] = str(value)
-
-            alarm_event = self.adapter.adapter_agent.create_alarm(
-                resource_id=self.device.id,
-                description="{}.{} - {}".format(self.adapter.name,
-                                                self.device.id,
-                                                alarm_data[
-                                                    'description']) if 'description' in alarm_data else None,
-                type=alarm_data['type'] if 'type' in alarm_data else None,
-                category=alarm_data[
-                    'category'] if 'category' in alarm_data else None,
-                severity=alarm_data[
-                    'severity'] if 'severity' in alarm_data else None,
-                state=alarm_data['state'] if 'state' in alarm_data else None,
-                raised_ts=alarm_data['ts'] if 'ts' in alarm_data else 0,
-                context=current_context
-            )
-
-            self.adapter.adapter_agent.submit_alarm(self.device.id,
-                                                    alarm_event)
-
-        except Exception as e:
-            log.exception('failed-to-send-alarm', e=e)
+        log.debug("send-alarm-not-implemented")
+        return
 
 
 class PonSimOltAdapter(OltAdapter):
@@ -379,6 +359,27 @@
     def reconcile(self, device):
         self.log.info('reconciling-OLT-device')
 
+    def _rcv_frame(self, frame):
+        pkt = Ether(frame)
+
+        if pkt.haslayer(Dot1Q):
+            outer_shim = pkt.getlayer(Dot1Q)
+
+            if isinstance(outer_shim.payload, Dot1Q):
+                inner_shim = outer_shim.payload
+                cvid = inner_shim.vlan
+                popped_frame = (
+                        Ether(src=pkt.src, dst=pkt.dst, type=inner_shim.type) /
+                        inner_shim.payload
+                )
+                self.log.info('sending-packet-in',device_id=self.device_id, port=cvid)
+                self.core_proxy.send_packet_in(device_id=self.device_id,
+                                               port=cvid,
+                                               packet=str(popped_frame))
+            elif pkt.haslayer(Raw):
+                raw_data = json.loads(pkt.getlayer(Raw).load)
+                self.alarms.send_alarm(self, raw_data)
+
     @inlineCallbacks
     def rcv_grpc(self):
         """
@@ -504,7 +505,7 @@
         out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
 
         # send over grpc stream
-        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        stub = ponsim_pb2.PonSimStub(self.channel)
         frame = PonSimFrame(id=self.device_id, payload=str(out_pkt),
                             out_port=out_port)
         stub.SendFrame(frame)
diff --git a/python/adapters/ponsim_onu/main.py b/python/adapters/ponsim_onu/main.py
index 3f18e50..d6418e9 100755
--- a/python/adapters/ponsim_onu/main.py
+++ b/python/adapters/ponsim_onu/main.py
@@ -29,22 +29,22 @@
 from twisted.internet.task import LoopingCall
 from zope.interface import implementer
 
-from adapters.common.structlog_setup import setup_logging, update_logging
-from adapters.common.utils.asleep import asleep
-from adapters.common.utils.deferred_utils import TimeOutError
-from adapters.common.utils.dockerhelpers import get_my_containers_name
-from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+from python.common.structlog_setup import setup_logging, update_logging
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import TimeOutError
+from python.common.utils.dockerhelpers import get_my_containers_name
+from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
     get_my_primary_interface
-from adapters.common.utils.registry import registry, IComponent
-from adapters.kafka.adapter_proxy import AdapterProxy
-from adapters.kafka.adapter_request_facade import AdapterRequestFacade
-from adapters.kafka.core_proxy import CoreProxy
-from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+from python.common.utils.registry import registry, IComponent
+from python.adapters.kafka.adapter_proxy import AdapterProxy
+from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from python.adapters.kafka.core_proxy import CoreProxy
+from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
     get_messaging_proxy
-from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
-from adapters.ponsim_onu.ponsim_onu import PonSimOnuAdapter
-from adapters.protos import third_party
-from adapters.protos.adapter_pb2 import AdapterConfig
+from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from ponsim_onu import PonSimOnuAdapter
+from python.protos import third_party
+from python.protos.adapter_pb2 import AdapterConfig
 
 _ = third_party
 
diff --git a/python/adapters/ponsim_onu/ponsim_onu.py b/python/adapters/ponsim_onu/ponsim_onu.py
index e15d0a9..eb4d716 100644
--- a/python/adapters/ponsim_onu/ponsim_onu.py
+++ b/python/adapters/ponsim_onu/ponsim_onu.py
@@ -29,20 +29,20 @@
     returnValue, Deferred
 from twisted.internet.task import LoopingCall
 
-from adapters.common.utils.asleep import asleep
-from adapters.iadapter import OnuAdapter
-from adapters.kafka.kafka_proxy import get_kafka_proxy
-from adapters.protos import third_party
-from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
-from adapters.protos.core_adapter_pb2 import PortCapability, \
+from python.common.utils.asleep import asleep
+from python.adapters.iadapter import OnuAdapter
+from python.adapters.kafka.kafka_proxy import get_kafka_proxy
+from python.protos import third_party
+from python.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from python.protos.core_adapter_pb2 import PortCapability, \
     InterAdapterMessageType, InterAdapterResponseBody
-from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
-from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
-from adapters.protos.logical_device_pb2 import LogicalPort
-from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+from python.protos.device_pb2 import Port, PmConfig, PmConfigs
+from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from python.protos.logical_device_pb2 import LogicalPort
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
     OFPPF_1GB_FD
-from adapters.protos.openflow_13_pb2 import ofp_port
-from adapters.protos.ponsim_pb2 import FlowTable, PonSimMetricsRequest, PonSimMetrics
+from python.protos.openflow_13_pb2 import ofp_port
+from python.protos.ponsim_pb2 import FlowTable, PonSimMetricsRequest, PonSimMetrics
 
 _ = third_party
 log = structlog.get_logger()
diff --git a/python/adapters/protos/Makefile b/python/adapters/protos/Makefile
deleted file mode 100644
index 0fad970..0000000
--- a/python/adapters/protos/Makefile
+++ /dev/null
@@ -1,101 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Makefile to build all protobuf and gRPC related artifacts
-
-default: third_party build
-
-PROTO_FILES := $(wildcard ../../protos/*.proto)
-PROTO_GOOGLE_API := $(wildcard third_party/google/api/*.proto)
-PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
-PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
-PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
-PROTO_PB2_GRPC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2_grpc.py,$(f)))
-PROTO_DESC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,.desc,$(f)))
-
-PROTOC_PREFIX := /usr/local
-PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
-
-PROTOC := $(PROTOC_PREFIX)/bin/protoc
-
-PROTOC_VERSION := "3.3.0"
-PROTOC_DOWNLOAD_PREFIX := "https://github.com/google/protobuf/releases/download"
-PROTOC_DIR := protobuf-$(PROTOC_VERSION)
-PROTOC_TARBALL := protobuf-python-$(PROTOC_VERSION).tar.gz
-PROTOC_DOWNLOAD_URI := $(PROTOC_DOWNLOAD_PREFIX)/v$(PROTOC_VERSION)/$(PROTOC_TARBALL)
-PROTOC_BUILD_TMP_DIR := "/tmp/protobuf-build-$(shell uname -s | tr '[:upper:]' '[:lower:]')"
-
-# Google API needs to be built from within the third party directory
-#
-third_party: google_api
-google_api:
-	@echo "Building protocol buffer artifacts from third_party google api"
-	cd third_party ; \
-	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
-	    -I. \
-	    --python_out=. \
-	    --grpc_python_out=. \
-	    --descriptor_set_out=google/api/annotations.desc \
-	    --include_imports \
-	    --include_source_info \
-        google/api/annotations.proto google/api/http.proto
-
-build: $(PROTOC) $(PROTO_PB2_FILES)
-
-%_pb2.py: %.proto Makefile
-	@echo "Building protocol buffer artifacts from $<"
-	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
-	    -I../../protos \
-	    -I./third_party \
-	    --python_out=. \
-	    --grpc_python_out=. \
-	    --descriptor_set_out=./$(basename $(notdir $<)).desc \
-	    --include_imports \
-	    --include_source_info \
-	    $<
-
-clean:
-	rm -f *.desc *_pb2* \
-		$(PROTO_PB2_GOOGLE_API) \
-		$(PROTO_PB2_GRPC_GOOGLE_API)\
-		$(PROTO_DESC_GOOGLE_API)
-
-$(PROTOC):
-	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
-	@echo "It looks like you don't have protocol buffer tools installed."
-	@echo "To install the protocol buffer toolchain, you can run:"
-	@echo "    make install-protoc"
-	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
-
-install-protoc: $(PROTOC)
-	@echo "Downloading and installing protocol buffer support."
-	@echo "Installation will require sodo priviledges"
-	@echo "This will take a few minutes."
-	mkdir -p $(PROTOC_BUILD_TMP_DIR)
-	@echo "We ask for sudo credentials now so we can install at the end"; \
-	sudo echo "Thanks"; \
-	    cd $(PROTOC_BUILD_TMP_DIR); \
-	    wget $(PROTOC_DOWNLOAD_URI); \
-	    tar xzvf $(PROTOC_TARBALL); \
-	    cd $(PROTOC_DIR); \
-	    ./configure --prefix=$(PROTOC_PREFIX); \
-	    make; \
-	    sudo make install
-
-uninstall-protoc:
-	cd $(PROTOC_BUILD_TMP_DIR)/$(PROTOC_DIR); \
-	    sudo make uninstall
-
diff --git a/python/adapters/protos/__init__.py b/python/adapters/protos/__init__.py
deleted file mode 100644
index cfcdc97..0000000
--- a/python/adapters/protos/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
\ No newline at end of file
diff --git a/python/adapters/protos/third_party/__init__.py b/python/adapters/protos/third_party/__init__.py
deleted file mode 100644
index 2740afe..0000000
--- a/python/adapters/protos/third_party/__init__.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Copyright 2017 the original author or authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-"""
-This helps loading http_pb2 and annotations_pb2.
-Without this, the Python importer will not be able to process the lines:
-from google.api import http_pb2 or
-from google.api import annotations_pb2
-(Without importing these, the protobuf loader will not recognize http options
-in the protobuf definitions.)
-"""
-
-from importlib import import_module
-import os
-import sys
-
-
-class GoogleApiImporter(object):
-
-    def find_module(self, full_name, path=None):
-        if full_name == 'google.api':
-            self.path = [os.path.dirname(__file__)]
-            return self
-
-    def load_module(self, name):
-        if name in sys.modules:
-            return sys.modules[name]
-        full_name = 'adapters.protos.third_party.' + name
-        import_module(full_name)
-        module = sys.modules[full_name]
-        sys.modules[name] = module
-        return module
-
-
-sys.meta_path.append(GoogleApiImporter())
-try:
-    from google.api import http_pb2, annotations_pb2
-    _ = http_pb2, annotations_pb2
-except AssertionError:
-    pass
diff --git a/python/adapters/protos/third_party/google/LICENSE b/python/adapters/protos/third_party/google/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/python/adapters/protos/third_party/google/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/python/adapters/protos/third_party/google/__init__.py b/python/adapters/protos/third_party/google/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/adapters/protos/third_party/google/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/adapters/protos/third_party/google/api/__init__.py b/python/adapters/protos/third_party/google/api/__init__.py
deleted file mode 100644
index b0fb0b2..0000000
--- a/python/adapters/protos/third_party/google/api/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2017-present Open Networking Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/python/adapters/protos/third_party/google/api/annotations.proto b/python/adapters/protos/third_party/google/api/annotations.proto
deleted file mode 100644
index cbd18b8..0000000
--- a/python/adapters/protos/third_party/google/api/annotations.proto
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2015, Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.api;
-
-import "google/api/http.proto";
-import "google/protobuf/descriptor.proto";
-
-option java_multiple_files = true;
-option java_outer_classname = "AnnotationsProto";
-option java_package = "com.google.api";
-
-extend google.protobuf.MethodOptions {
-  // See `HttpRule`.
-  HttpRule http = 72295728;
-}
diff --git a/python/adapters/protos/third_party/google/api/http.proto b/python/adapters/protos/third_party/google/api/http.proto
deleted file mode 100644
index ce07aa1..0000000
--- a/python/adapters/protos/third_party/google/api/http.proto
+++ /dev/null
@@ -1,127 +0,0 @@
-// Copyright (c) 2015, Google Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-syntax = "proto3";
-
-package google.api;
-
-option java_multiple_files = true;
-option java_outer_classname = "HttpProto";
-option java_package = "com.google.api";
-
-
-// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API
-// methods. The mapping determines what portions of the request message are
-// populated from the path, query parameters, or body of the HTTP request.  The
-// mapping is typically specified as an `google.api.http` annotation, see
-// "google/api/annotations.proto" for details.
-//
-// The mapping consists of a mandatory field specifying a path template and an
-// optional `body` field specifying what data is represented in the HTTP request
-// body. The field name for the path indicates the HTTP method. Example:
-//
-// ```
-// package google.storage.v2;
-//
-// import "google/api/annotations.proto";
-//
-// service Storage {
-//   rpc CreateObject(CreateObjectRequest) returns (Object) {
-//     option (google.api.http) {
-//       post: "/v2/{bucket_name=buckets/*}/objects"
-//       body: "object"
-//     };
-//   };
-// }
-// ```
-//
-// Here `bucket_name` and `object` bind to fields of the request message
-// `CreateObjectRequest`.
-//
-// The rules for mapping HTTP path, query parameters, and body fields
-// to the request message are as follows:
-//
-// 1. The `body` field specifies either `*` or a field path, or is
-//    omitted. If omitted, it assumes there is no HTTP body.
-// 2. Leaf fields (recursive expansion of nested messages in the
-//    request) can be classified into three types:
-//     (a) Matched in the URL template.
-//     (b) Covered by body (if body is `*`, everything except (a) fields;
-//         else everything under the body field)
-//     (c) All other fields.
-// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
-// 4. Any body sent with an HTTP request can contain only (b) fields.
-//
-// The syntax of the path template is as follows:
-//
-//     Template = "/" Segments [ Verb ] ;
-//     Segments = Segment { "/" Segment } ;
-//     Segment  = "*" | "**" | LITERAL | Variable ;
-//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
-//     FieldPath = IDENT { "." IDENT } ;
-//     Verb     = ":" LITERAL ;
-//
-// `*` matches a single path component, `**` zero or more path components, and
-// `LITERAL` a constant.  A `Variable` can match an entire path as specified
-// again by a template; this nested template must not contain further variables.
-// If no template is given with a variable, it matches a single path component.
-// The notation `{var}` is henceforth equivalent to `{var=*}`.
-//
-// Use CustomHttpPattern to specify any HTTP method that is not included in the
-// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for
-// a given URL path rule. The wild-card rule is useful for services that provide
-// content to Web (HTML) clients.
-message HttpRule {
-
-  // Determines the URL pattern is matched by this rules. This pattern can be
-  // used with any of the {get|put|post|delete|patch} methods. A custom method
-  // can be defined using the 'custom' field.
-  oneof pattern {
-    // Used for listing and getting information about resources.
-    string get = 2;
-
-    // Used for updating a resource.
-    string put = 3;
-
-    // Used for creating a resource.
-    string post = 4;
-
-    // Used for deleting a resource.
-    string delete = 5;
-
-    // Used for updating a resource.
-    string patch = 6;
-
-    // Custom pattern is used for defining custom verbs.
-    CustomHttpPattern custom = 8;
-  }
-
-  // The name of the request field whose value is mapped to the HTTP body, or
-  // `*` for mapping all fields not captured by the path pattern to the HTTP
-  // body.
-  string body = 7;
-
-  // Additional HTTP bindings for the selector. Nested bindings must not
-  // specify a selector and must not contain additional bindings.
-  repeated HttpRule additional_bindings = 11;
-}
-
-// A custom pattern is used for defining custom HTTP verb.
-message CustomHttpPattern {
-  // The name of this custom HTTP verb.
-  string kind = 1;
-
-  // The path matched by this custom verb.
-  string path = 2;
-}
diff --git a/python/adapters/requirements.txt b/python/adapters/requirements.txt
deleted file mode 100755
index a0641b2..0000000
--- a/python/adapters/requirements.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-argparse==1.2.1
-arrow==0.10.0
-bitstring==3.1.5
-cmd2==0.7.0
-colorama==0.3.9
-cython==0.24.1
-decorator==4.1.2
-docker-py==1.10.6
-fluent-logger==0.6.0
-grpc==0.3.post19
-grpcio==1.3.5
-grpcio-tools==1.3.5
-hash_ring==1.3.1
-hexdump==3.3
-jinja2==2.8
-jsonpatch==1.16
-kafka_python==1.3.5
-klein==17.10.0
-kubernetes==5.0.0
-netaddr==0.7.19
-networkx==2.0
-nose==1.3.7
-nose-exclude==0.5.0
-nose-testconfig==0.10
-mock==2.0.0
-netifaces==0.10.6
-pcapy==0.11.1
-pep8==1.7.1
-pep8-naming>=0.3.3
-protobuf==3.3.0
-protobuf-to-dict==0.1.0
-pyflakes==1.6.0
-pylint==1.7.6
-#pypcap>=1.1.5
-pyOpenSSL==17.3.0
-PyYAML==3.12
-requests==2.18.4
-scapy==2.3.3
-service-identity==17.0.0
-simplejson==3.12.0
-jsonschema==2.6.0
-six==1.11.0
-structlog==17.2.0
-termcolor==1.1.0
-transitions==0.6.4
-treq==17.8.0
-Twisted==17.9.0
-txaioetcd==0.3.0
-urllib3==1.22
-pyang==1.7.3
-lxml==3.6.4
-nosexcover==1.0.11
-zmq==0.0.0
-pyzmq==16.0.3
-txZMQ==0.8.0
-ncclient==0.5.3
-xmltodict==0.11.0
-dicttoxml==1.7.4
-etcd3==0.7.0
-pyparsing==2.2.0
-packaging==17.1
-
-# python-consul>=0.6.1  we need the pre-released version for now, because 0.6.1 does not
-# yet support Twisted. Once this is released, it will be the 0.6.2 version
-git+https://github.com/cablehead/python-consul.git
-
-# Twisted Python kafka client
-git+https://github.com/ciena/afkak.git