This commit consists of the following:
1) The kafka messaging proxy in Twisted python for adapters
2) Initial implementation and containerization of ponsim OLT adapter
and ponsim ONU adapter
3) Initial submission of request and response facade in both Twisted
python and Go Language
4) Initial implementation of device management and logical device management
in the Core
5) Update to the log module to allow dynamic setting of log level per
package using the gRPC API
6) Bug fixes and minor changes

Change-Id: Ia8f033da84cfd08275335bae9542802415e7bb0f
diff --git a/.gitignore b/.gitignore
index 2b95835..bfd57da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -10,6 +10,9 @@
 .vagrant
 *.box
 
+# venv
+.venv
+
 # Ansible
 ansible/*.retry
 
@@ -20,6 +23,8 @@
 **/*_pb2.py
 **/*_pb2_grpc.py
 **/*.pb.go
+**/*.pyc
+
 
 # Editors
 *.bak
diff --git a/Makefile b/Makefile
index 840e855..1761e50 100644
--- a/Makefile
+++ b/Makefile
@@ -78,7 +78,11 @@
 	@echo "    CLEAN $(basename $@)"
 	$(Q)$(MAKE) -C $(basename $@) clean
 
-build: containers
+build: protoc protos 
+#build: protoc protos containers
+
+base:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-base:${TAG} -f adapters/docker/Dockerfile.base .
 
 containers: rw_core
 
@@ -90,5 +94,17 @@
 	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-rw-core:${TAG} -f docker/Dockerfile.rw_core_d .
 endif
 
+protoc:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} -f adapters/docker/Dockerfile.protoc .
+
+protos:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} -f adapters/docker/Dockerfile.protos .
+
+ponsim_adapter_olt:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-olt:${TAG} -f adapters/docker/Dockerfile.ponsim_adapter_olt .
+
+ponsim_adapter_onu:
+	docker build $(DOCKER_BUILD_ARGS) -t ${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-onu:${TAG} -f adapters/docker/Dockerfile.ponsim_adapter_onu .
+
 
 # end file
diff --git a/adapters/__init__.py b/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/__init__.py
diff --git a/adapters/common/__init__.py b/adapters/common/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/event_bus.py b/adapters/common/event_bus.py
new file mode 100644
index 0000000..e717c16
--- /dev/null
+++ b/adapters/common/event_bus.py
@@ -0,0 +1,194 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A simple internal pub/sub event bus with topics and filter-based registration.
+"""
+import re
+
+import structlog
+
+
+log = structlog.get_logger()
+
+
+class _Subscription(object):
+
+    __slots__ = ('bus', 'predicate', 'callback', 'topic')
+    def __init__(self, bus, predicate, callback, topic=None):
+        self.bus = bus
+        self.predicate = predicate
+        self.callback = callback
+        self.topic = topic
+
+
+class EventBus(object):
+
+    def __init__(self):
+        self.subscriptions = {}  # topic -> list of _Subscription objects
+                                 # topic None holds regexp based topic subs.
+        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
+
+    def list_subscribers(self, topic=None):
+        if topic is None:
+            return sum(self.subscriptions.itervalues(), [])
+        else:
+            if topic in self.subscriptions:
+                return self.subscriptions[topic]
+            else:
+                return []
+
+    @staticmethod
+    def _get_topic_key(topic):
+        if isinstance(topic, str):
+            return topic
+        elif hasattr(topic, 'match'):
+            return None
+        else:
+            raise AttributeError('topic not a string nor a compiled regex')
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function signature def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        subscription = _Subscription(self, predicate, callback, topic)
+        topic_key = self._get_topic_key(topic)
+        self.subscriptions.setdefault(topic_key, []).append(subscription)
+        self.subs_topic_map[subscription] = topic_key
+        return subscription
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        topic_key = self.subs_topic_map[subscription]
+        self.subscriptions[topic_key].remove(subscription)
+
+    def publish(self, topic, msg):
+        """
+        Publish given message to all subscribers registered with topic taking
+        the predicate functions into account.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        from copy import copy
+
+        def passes(msg, predicate):
+            try:
+                return predicate(msg)
+            except Exception, e:
+                return False  # failed predicate function treated as no match
+
+        # lookup subscribers with explicit topic subscriptions
+        subscribers = self.subscriptions.get(topic, [])
+
+        # add matching regexp topic subscribers
+        subscribers.extend(s for s in self.subscriptions.get(None, [])
+                           if s.topic.match(topic))
+
+        # iterate over a shallow-copy of subscribers
+        for candidate in copy(subscribers):
+            predicate = candidate.predicate
+            if predicate is None or passes(msg, predicate):
+                try:
+                    candidate.callback(topic, msg)
+                except Exception, e:
+                    log.exception('callback-failed', e=repr(e), topic=topic)
+
+
+
+default_bus = EventBus()
+
+
+class EventBusClient(object):
+    """
+    Primary interface to the EventBus. Usage:
+
+    Publish:
+    >>> events = EventBusClient()
+    >>> msg = dict(a=1, b='foo')
+    >>> events.publish('a.topic', msg)
+
+    Subscribe to get all messages on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event)
+
+    Subscribe to get messages matching predicate on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
+
+    Use a DeferredQueue to buffer incoming messages
+    >>> queue = DeferredQueue()
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
+
+    """
+    def __init__(self, bus=None):
+        """
+        Obtain a client interface for the pub/sub event bus.
+        :param bus: An optional specific event bus. Inteded for mainly test
+        use. If not provided, the process default bus will be used, which is
+        the preferred use (a process shall not need more than one bus).
+        """
+        self.bus = bus or default_bus
+
+    def publish(self, topic, msg):
+        """
+        Publish given msg to given topic.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        self.bus.publish(topic, msg)
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function with signature
+        def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        return self.bus.subscribe(topic, callback, predicate)
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        return self.bus.unsubscribe(subscription)
+
+    def list_subscribers(self, topic=None):
+        """
+        Return list of subscribers. If topci is provided, it is filtered for
+        those subscribing to the topic.
+        :param topic: Optional topic
+        :return: List of subscriptions
+        """
+        return self.bus.list_subscribers(topic)
diff --git a/adapters/common/frameio/__init__.py b/adapters/common/frameio/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/frameio.py b/adapters/common/frameio/frameio.py
new file mode 100644
index 0000000..2f68ef8
--- /dev/null
+++ b/adapters/common/frameio/frameio.py
@@ -0,0 +1,437 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A module that can send and receive raw ethernet frames on a set of interfaces
+and it can manage a set of vlan interfaces on top of existing
+interfaces. Due to reliance on raw sockets, this module requires
+root access. Also, raw sockets are hard to deal with in Twisted (not
+directly supported) we need to run the receiver select loop on a dedicated
+thread.
+"""
+
+import os
+import socket
+import struct
+import uuid
+from pcapy import BPFProgram
+from threading import Thread, Condition
+
+import fcntl
+
+import select
+import structlog
+import sys
+
+from scapy.data import ETH_P_ALL
+from twisted.internet import reactor
+from zope.interface import implementer
+
+from adapters.common.utils.registry import IComponent
+
+if sys.platform.startswith('linux'):
+    from adapters.common.frameio.third_party.oftest import afpacket, netutils
+elif sys.platform == 'darwin':
+    from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
+
+log = structlog.get_logger()
+
+
+def hexify(buffer):
+    """
+    Return a hexadecimal string encoding of input buffer
+    """
+    return ''.join('%02x' % ord(c) for c in buffer)
+
+
+class _SelectWakerDescriptor(object):
+    """
+    A descriptor that can be mixed into a select loop to wake it up.
+    """
+    def __init__(self):
+        self.pipe_read, self.pipe_write = os.pipe()
+        fcntl.fcntl(self.pipe_write, fcntl.F_SETFL, os.O_NONBLOCK)
+
+    def __del__(self):
+        os.close(self.pipe_read)
+        os.close(self.pipe_write)
+
+    def fileno(self):
+        return self.pipe_read
+
+    def wait(self):
+        os.read(self.pipe_read, 1)
+
+    def notify(self):
+        """Trigger a select loop"""
+        os.write(self.pipe_write, '\x00')
+
+
+class BpfProgramFilter(object):
+    """
+    Convenience packet filter based on the well-tried Berkeley Packet Filter,
+    used by many well known open source tools such as pcap and tcpdump.
+    """
+    def __init__(self, program_string):
+        """
+        Create a filter using the BPF command syntax. To learn more,
+        consult 'man pcap-filter'.
+        :param program_string: The textual definition of the filter. Examples:
+        'vlan 1000'
+        'vlan 1000 and ip src host 10.10.10.10'
+        """
+        self.bpf = BPFProgram(program_string)
+
+    def __call__(self, frame):
+        """
+        Return 1 if frame passes filter.
+        :param frame: Raw frame provided as Python string
+        :return: 1 if frame satisfies filter, 0 otherwise.
+        """
+        return self.bpf.filter(frame)
+
+
+class FrameIOPort(object):
+    """
+    Represents a network interface which we can send/receive raw
+    Ethernet frames.
+    """
+
+    RCV_SIZE_DEFAULT = 4096
+    ETH_P_ALL = 0x03
+    RCV_TIMEOUT = 10000
+    MIN_PKT_SIZE = 60
+
+    def __init__(self, iface_name):
+        self.iface_name = iface_name
+        self.proxies = []
+        self.socket = self.open_socket(self.iface_name)
+        log.debug('socket-opened', fn=self.fileno(), iface=iface_name)
+        self.received = 0
+        self.discarded = 0
+
+    def add_proxy(self, proxy):
+        self.proxies.append(proxy)
+
+    def del_proxy(self, proxy):
+        self.proxies = [p for p in self.proxies if p.name != proxy.name]
+
+    def open_socket(self, iface_name):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def rcv_frame(self):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def __del__(self):
+        if self.socket:
+            self.socket.close()
+            self.socket = None
+        log.debug('socket-closed', iface=self.iface_name)
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    def _dispatch(self, proxy, frame):
+        log.debug('calling-publisher', proxy=proxy.name, frame=hexify(frame))
+        try:
+            proxy.callback(proxy, frame)
+        except Exception as e:
+            log.exception('callback-error',
+                          explanation='Callback failed while processing frame',
+                          e=e)
+
+    def recv(self):
+        """Called on the select thread when a packet arrives"""
+        try:
+            frame = self.rcv_frame()
+        except RuntimeError as e:
+            # we observed this happens sometimes right after the socket was
+            # attached to a newly created veth interface. So we log it, but
+            # allow to continue.
+            log.warn('afpacket-recv-error', code=-1)
+            return
+
+        log.debug('frame-received', iface=self.iface_name, len=len(frame),
+                  hex=hexify(frame))
+        self.received +=1
+        dispatched = False
+        for proxy in self.proxies:
+            if proxy.filter is None or proxy.filter(frame):
+                log.debug('frame-dispatched')
+                dispatched = True
+                reactor.callFromThread(self._dispatch, proxy, frame)
+
+        if not dispatched:
+            self.discarded += 1
+            log.debug('frame-discarded')
+
+    def send(self, frame):
+        log.debug('sending', len=len(frame), iface=self.iface_name)
+        sent_bytes = self.send_frame(frame)
+        if sent_bytes != len(frame):
+            log.error('send-error', iface=self.iface_name,
+                      wanted_to_send=len(frame), actually_sent=sent_bytes)
+        return sent_bytes
+
+    def send_frame(self, frame):
+        try:
+            return self.socket.send(frame)
+        except socket.error, err:
+            if err[0] == os.errno.EINVAL:
+                if len(frame) < self.MIN_PKT_SIZE:
+                    padding = '\x00' * (self.MIN_PKT_SIZE - len(frame))
+                    frame = frame + padding
+                    return self.socket.send(frame)
+            else:
+                raise
+
+    def up(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} up'.format(self.iface_name))
+        return self
+
+    def down(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} down'.format(self.iface_name))
+        return self
+
+    def statistics(self):
+        return self.received, self.discarded
+
+
+class LinuxFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0)
+        afpacket.enable_auxdata(s)
+        s.bind((self.iface_name, self.ETH_P_ALL))
+        netutils.set_promisc(s, iface_name)
+        s.settimeout(self.RCV_TIMEOUT)
+        return s
+
+    def rcv_frame(self):
+        return afpacket.recv(self.socket, self.RCV_SIZE_DEFAULT)
+
+
+class DarwinFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        sin = pcapdnet.open_pcap(iface_name, 1600, 1, 100)
+        try:
+            fcntl.ioctl(sin.fileno(), BIOCIMMEDIATE, struct.pack("I",1))
+        except:
+            pass
+
+        # need a different kind of socket for sending out
+        self.sout = dnet.eth(iface_name)
+
+        return sin
+
+    def send_frame(self, frame):
+        return self.sout.send(frame)
+
+    def rcv_frame(self):
+        pkt = self.socket.next()
+        if pkt is not None:
+            ts, pkt = pkt
+        return pkt
+
+
+if sys.platform == 'darwin':
+    _FrameIOPort = DarwinFrameIOPort
+elif sys.platform.startswith('linux'):
+    _FrameIOPort = LinuxFrameIOPort
+else:
+    raise Exception('Unsupported platform {}'.format(sys.platform))
+    sys.exit(1)
+
+
+class FrameIOPortProxy(object):
+    """Makes FrameIOPort sharable between multiple users"""
+
+    def __init__(self, frame_io_port, callback, filter=None, name=None):
+        self.frame_io_port = frame_io_port
+        self.callback = callback
+        self.filter = filter
+        self.name = uuid.uuid4().hex[:12] if name is None else name
+
+    @property
+    def iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def get_iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def send(self, frame):
+        return self.frame_io_port.send(frame)
+
+    def up(self):
+        self.frame_io_port.up()
+        return self
+
+    def down(self):
+        self.frame_io_port.down()
+        return self
+
+
+@implementer(IComponent)
+class FrameIOManager(Thread):
+    """
+    Packet/Frame IO manager that can be used to send/receive raw frames
+    on a set of network interfaces.
+    """
+    def __init__(self):
+        super(FrameIOManager, self).__init__()
+
+        self.ports = {}  # iface_name -> ActiveFrameReceiver
+        self.queue = {}  # iface_name -> TODO
+
+        self.cvar = Condition()
+        self.waker = _SelectWakerDescriptor()
+        self.stopped = False
+        self.ports_changed = False
+
+    # ~~~~~~~~~~~ exposed methods callable from main thread ~~~~~~~~~~~~~~~~~~~
+
+    def start(self):
+        """
+        Start the IO manager and its select loop thread
+        """
+        log.debug('starting')
+        super(FrameIOManager, self).start()
+        log.info('started')
+        return self
+
+    def stop(self):
+        """
+        Stop the IO manager and its thread with the select loop
+        """
+        log.debug('stopping')
+        self.stopped = True
+        self.waker.notify()
+        self.join()
+        del self.ports
+        log.info('stopped')
+
+    def list_interfaces(self):
+        """
+        Return list of interfaces listened on
+        :return: List of FrameIOPort objects
+        """
+        return self.ports
+
+    def open_port(self, iface_name, callback, filter=None, name=None):
+        """
+        Add a new interface and start receiving on it.
+        :param iface_name: Name of the interface. Must be an existing Unix
+        interface (eth0, en0, etc.)
+        :param callback: Called on each received frame;
+        signature: def callback(port, frame) where port is the FrameIOPort
+        instance at which the frame was received, frame is the actual frame
+        received (as binay string)
+        :param filter: An optional filter (predicate), with signature:
+        def filter(frame). If provided, only frames for which filter evaluates
+        to True will be forwarded to callback.
+        :return: FrmaeIOPortProxy instance.
+        """
+
+        port = self.ports.get(iface_name)
+        if port is None:
+            port = _FrameIOPort(iface_name)
+            self.ports[iface_name] = port
+            self.ports_changed = True
+            self.waker.notify()
+
+        proxy = FrameIOPortProxy(port, callback, filter, name)
+        port.add_proxy(proxy)
+
+        return proxy
+
+    def close_port(self, proxy):
+        """
+        Remove the proxy. If this is the last proxy on an interface, stop and
+        remove the named interface as well
+        :param proxy: FrameIOPortProxy reference
+        :return: None
+        """
+        assert isinstance(proxy, FrameIOPortProxy)
+        iface_name = proxy.get_iface_name()
+        assert iface_name in self.ports, "iface_name {} unknown".format(iface_name)
+        port = self.ports[iface_name]
+        port.del_proxy(proxy)
+
+        if not port.proxies:
+            del self.ports[iface_name]
+            # need to exit select loop to reconstruct select fd lists
+            self.ports_changed = True
+            self.waker.notify()
+
+    def send(self, iface_name, frame):
+        """
+        Send frame on given interface
+        :param iface_name: Name of previously registered interface
+        :param frame: frame as string
+        :return: number of bytes sent
+        """
+        return self.ports[iface_name].send(frame)
+
+    # ~~~~~~~~~~~~~ Thread methods (running on non-main thread ~~~~~~~~~~~~~~~~
+
+    def run(self):
+        """
+        Called on the alien thread, this is the core multi-port receive loop
+        """
+
+        log.debug('select-loop-started')
+
+        # outer loop constructs sockets list for select
+        while not self.stopped:
+            sockets = [self.waker] + self.ports.values()
+            self.ports_changed = False
+            empty = []
+            # inner select loop
+
+            while not self.stopped:
+                try:
+                    _in, _out, _err = select.select(sockets, empty, empty, 1)
+                except Exception as e:
+                    log.exception('frame-io-select-error', e=e)
+                    break
+                with self.cvar:
+                    for port in _in:
+                        if port is self.waker:
+                            self.waker.wait()
+                            continue
+                        else:
+                            port.recv()
+                    self.cvar.notify_all()
+                if self.ports_changed:
+                    break  # break inner loop so we reconstruct sockets list
+
+        log.debug('select-loop-exited')
+
+    def del_interface(self, iface_name):
+        """
+            Delete interface for stopping
+        """
+
+        log.info('Delete interface')
+        del self.ports[iface_name]
+        log.info('Interface(port) is deleted')
diff --git a/adapters/common/frameio/third_party/__init__.py b/adapters/common/frameio/third_party/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/third_party/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/third_party/oftest/LICENSE b/adapters/common/frameio/third_party/oftest/LICENSE
new file mode 100644
index 0000000..3216042
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/LICENSE
@@ -0,0 +1,36 @@
+OpenFlow Test Framework
+
+Copyright (c) 2010 The Board of Trustees of The Leland Stanford
+Junior University
+
+Except where otherwise noted, this software is distributed under
+the OpenFlow Software License.  See
+http://www.openflowswitch.org/wp/legal/ for current details.
+
+We are making the OpenFlow specification and associated documentation
+(Software) available for public use and benefit with the expectation
+that others will use, modify and enhance the Software and contribute
+those enhancements back to the community. However, since we would like
+to make the Software available for broadest use, with as few
+restrictions as possible permission is hereby granted, free of charge,
+to any person obtaining a copy of this Software to deal in the
+Software under the copyrights without restriction, including without
+limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED -Y´AS IS¡, WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+The name and trademarks of copyright holder(s) may NOT be used in
+advertising or publicity pertaining to the Software or any derivatives
+without specific, written prior permission.
diff --git a/adapters/common/frameio/third_party/oftest/README.md b/adapters/common/frameio/third_party/oftest/README.md
new file mode 100644
index 0000000..f0cb649
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/README.md
@@ -0,0 +1,6 @@
+Files in this directory are derived from the respective files
+in oftest (http://github.com/floodlight/oftest).
+ 
+For the licensing terms of these files, see LICENSE in this dir.
+ 
+
diff --git a/adapters/common/frameio/third_party/oftest/__init__.py b/adapters/common/frameio/third_party/oftest/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/third_party/oftest/afpacket.py b/adapters/common/frameio/third_party/oftest/afpacket.py
new file mode 100644
index 0000000..9ae8075
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/afpacket.py
@@ -0,0 +1,124 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+AF_PACKET receive support
+
+When VLAN offload is enabled on the NIC Linux will not deliver the VLAN tag
+in the data returned by recv. Instead, it delivers the VLAN TCI in a control
+message. Python 2.x doesn't have built-in support for recvmsg, so we have to
+use ctypes to call it. The recv function exported by this module reconstructs
+the VLAN tag if it was offloaded.
+"""
+
+import struct
+from ctypes import *
+
+ETH_P_8021Q = 0x8100
+SOL_PACKET = 263
+PACKET_AUXDATA = 8
+TP_STATUS_VLAN_VALID = 1 << 4
+
+class struct_iovec(Structure):
+    _fields_ = [
+        ("iov_base", c_void_p),
+        ("iov_len", c_size_t),
+    ]
+
+class struct_msghdr(Structure):
+    _fields_ = [
+        ("msg_name", c_void_p),
+        ("msg_namelen", c_uint32),
+        ("msg_iov", POINTER(struct_iovec)),
+        ("msg_iovlen", c_size_t),
+        ("msg_control", c_void_p),
+        ("msg_controllen", c_size_t),
+        ("msg_flags", c_int),
+    ]
+
+class struct_cmsghdr(Structure):
+    _fields_ = [
+        ("cmsg_len", c_size_t),
+        ("cmsg_level", c_int),
+        ("cmsg_type", c_int),
+    ]
+
+class struct_tpacket_auxdata(Structure):
+    _fields_ = [
+        ("tp_status", c_uint),
+        ("tp_len", c_uint),
+        ("tp_snaplen", c_uint),
+        ("tp_mac", c_ushort),
+        ("tp_net", c_ushort),
+        ("tp_vlan_tci", c_ushort),
+        ("tp_padding", c_ushort),
+    ]
+
+libc = CDLL("libc.so.6")
+recvmsg = libc.recvmsg
+recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
+recvmsg.retype = c_int
+
+def enable_auxdata(sk):
+    """
+    Ask the kernel to return the VLAN tag in a control message
+
+    Must be called on the socket before afpacket.recv.
+    """
+    sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
+
+def recv(sk, bufsize):
+    """
+    Receive a packet from an AF_PACKET socket
+    @sk Socket
+    @bufsize Maximum packet size
+    """
+    buf = create_string_buffer(bufsize)
+
+    ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
+    ctrl_buf = create_string_buffer(ctrl_bufsize)
+
+    iov = struct_iovec()
+    iov.iov_base = cast(buf, c_void_p)
+    iov.iov_len = bufsize
+
+    msghdr = struct_msghdr()
+    msghdr.msg_name = None
+    msghdr.msg_namelen = 0
+    msghdr.msg_iov = pointer(iov)
+    msghdr.msg_iovlen = 1
+    msghdr.msg_control = cast(ctrl_buf, c_void_p)
+    msghdr.msg_controllen = ctrl_bufsize
+    msghdr.msg_flags = 0
+
+    rv = recvmsg(sk.fileno(), byref(msghdr), 0)
+    if rv < 0:
+        raise RuntimeError("recvmsg failed: rv=%d", rv)
+
+    # The kernel only delivers control messages we ask for. We
+    # only enabled PACKET_AUXDATA, so we can assume it's the
+    # only control message.
+    assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
+
+    cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
+    assert cmsghdr.cmsg_level == SOL_PACKET
+    assert cmsghdr.cmsg_type == PACKET_AUXDATA
+
+    auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
+
+    if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
+        # Insert VLAN tag
+        tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
+        return buf.raw[:12] + tag + buf.raw[12:rv]
+    else:
+        return buf.raw[:rv]
diff --git a/adapters/common/frameio/third_party/oftest/netutils.py b/adapters/common/frameio/third_party/oftest/netutils.py
new file mode 100644
index 0000000..092d490
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/netutils.py
@@ -0,0 +1,73 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Network utilities for the OpenFlow test framework
+"""
+
+###########################################################################
+##                                                                         ##
+## Promiscuous mode enable/disable                                         ##
+##                                                                         ##
+## Based on code from Scapy by Phillippe Biondi                            ##
+##                                                                         ##
+##                                                                         ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License as published by the   ##
+## Free Software Foundation; either version 2, or (at your option) any     ##
+## later version.                                                          ##
+##                                                                         ##
+## This program is distributed in the hope that it will be useful, but     ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of              ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU       ##
+## General Public License for more details.                                ##
+##                                                                         ##
+#############################################################################
+
+import socket
+from fcntl import ioctl
+import struct
+
+# From net/if_arp.h
+ARPHDR_ETHER = 1
+ARPHDR_LOOPBACK = 772
+
+# From bits/ioctls.h
+SIOCGIFHWADDR  = 0x8927          # Get hardware address
+SIOCGIFINDEX   = 0x8933          # name -> if_index mapping
+
+# From netpacket/packet.h
+PACKET_ADD_MEMBERSHIP  = 1
+PACKET_DROP_MEMBERSHIP = 2
+PACKET_MR_PROMISC      = 1
+
+# From bits/socket.h
+SOL_PACKET = 263
+
+def get_if(iff,cmd):
+  s=socket.socket()
+  ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
+  s.close()
+  return ifreq
+
+def get_if_index(iff):
+  return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
+
+def set_promisc(s,iff,val=1):
+  mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, "")
+  if val:
+      cmd = PACKET_ADD_MEMBERSHIP
+  else:
+      cmd = PACKET_DROP_MEMBERSHIP
+  s.setsockopt(SOL_PACKET, cmd, mreq)
+
diff --git a/adapters/common/manhole.py b/adapters/common/manhole.py
new file mode 100644
index 0000000..c00c900
--- /dev/null
+++ b/adapters/common/manhole.py
@@ -0,0 +1,129 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import rlcompleter
+from pprint import pprint
+
+import structlog
+from twisted.conch import manhole_ssh
+from twisted.conch.manhole import ColoredManhole
+from twisted.conch.ssh import keys
+from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
+from twisted.cred.portal import Portal
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+
+
+MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
+MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
+
+
+def get_rsa_keys():
+    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
+                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
+        # generate a RSA keypair
+        log.info('generate-rsa-keypair')
+        from Crypto.PublicKey import RSA
+        rsa_key = RSA.generate(1024)
+        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
+        private_key_str = rsa_key.exportKey()
+
+        # save keys for next time
+        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
+        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
+        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
+                  private=MANHOLE_SERVER_RSA_PRIVATE)
+    else:
+        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
+        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
+    return public_key_str, private_key_str
+
+
+class ManholeWithCompleter(ColoredManhole):
+
+    def __init__(self, namespace):
+        namespace['manhole'] = self
+        super(ManholeWithCompleter, self).__init__(namespace)
+        self.last_tab = None
+        self.completer = rlcompleter.Completer(self.namespace)
+
+    def handle_TAB(self):
+        if self.last_tab != self.lineBuffer:
+            self.last_tab = self.lineBuffer
+            return
+
+        buffer = ''.join(self.lineBuffer)
+        completions = []
+        maxlen = 3
+        for c in xrange(1000):
+            candidate = self.completer.complete(buffer, c)
+            if not candidate:
+                break
+
+            if len(candidate) > maxlen:
+                maxlen = len(candidate)
+
+            completions.append(candidate)
+
+        if len(completions) == 1:
+            rest = completions[0][len(buffer):]
+            self.terminal.write(rest)
+            self.lineBufferIndex += len(rest)
+            self.lineBuffer.extend(rest)
+
+        elif len(completions):
+            maxlen += 3
+            numcols = self.width / maxlen
+            self.terminal.nextLine()
+            for idx, candidate in enumerate(completions):
+                self.terminal.write('%%-%ss' % maxlen % candidate)
+                if not ((idx + 1) % numcols):
+                    self.terminal.nextLine()
+            self.terminal.nextLine()
+            self.drawInputLine()
+
+
+class Manhole(object):
+
+    def __init__(self, port, pws, **kw):
+        kw.update(globals())
+        kw['pp'] = pprint
+
+        realm = manhole_ssh.TerminalRealm()
+        manhole = ManholeWithCompleter(kw)
+
+        def windowChanged(_, win_size):
+            manhole.terminalSize(*reversed(win_size[:2]))
+
+        realm.sessionFactory.windowChanged = windowChanged
+        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
+        portal = Portal(realm)
+        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
+        factory = manhole_ssh.ConchFactory(portal)
+        public_key_str, private_key_str = get_rsa_keys()
+        factory.publicKeys = {
+            'ssh-rsa': keys.Key.fromString(public_key_str)
+        }
+        factory.privateKeys = {
+            'ssh-rsa': keys.Key.fromString(private_key_str)
+        }
+        reactor.listenTCP(port, factory, interface='localhost')
+
+
+if __name__ == '__main__':
+    Manhole(12222, dict(admin='admin'))
+    reactor.run()
diff --git a/adapters/common/openflow/__init__.py b/adapters/common/openflow/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/openflow/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/openflow/utils.py b/adapters/common/openflow/utils.py
new file mode 100644
index 0000000..b4c66cb
--- /dev/null
+++ b/adapters/common/openflow/utils.py
@@ -0,0 +1,45 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from adapters.protos import openflow_13_pb2 as ofp
+
+OUTPUT = ofp.OFPAT_OUTPUT
+ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
+IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
+
+def get_ofb_fields(flow):
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    assert flow.match.type == ofp.OFPMT_OXM
+    ofb_fields = []
+    for field in flow.match.oxm_fields:
+        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
+        ofb_fields.append(field.ofb_field)
+    return ofb_fields
+
+def get_actions(flow):
+    """Extract list of ofp_action objects from flow spec object"""
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    # we have the following hard assumptions for now
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+            return instruction.actions.actions
+
+def get_out_port(flow):
+    for action in get_actions(flow):
+        if action.type == OUTPUT:
+            return action.output.port
+    return None
diff --git a/adapters/common/structlog_setup.py b/adapters/common/structlog_setup.py
new file mode 100644
index 0000000..3401977
--- /dev/null
+++ b/adapters/common/structlog_setup.py
@@ -0,0 +1,134 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Setting up proper logging for Voltha"""
+
+import logging
+import logging.config
+from collections import OrderedDict
+
+import structlog
+from structlog.stdlib import BoundLogger, INFO
+
+try:
+    from thread import get_ident as _get_ident
+except ImportError:
+    from dummy_thread import get_ident as _get_ident
+
+
+class StructuredLogRenderer(object):
+    def __call__(self, logger, name, event_dict):
+        # in order to keep structured log data in event_dict to be forwarded as
+        # is, we need to pass it into the logger framework as the first
+        # positional argument.
+        args = (event_dict,)
+        kwargs = {}
+        return args, kwargs
+
+
+class PlainRenderedOrderedDict(OrderedDict):
+    """Our special version of OrderedDict that renders into string as a dict,
+       to make the log stream output cleaner.
+    """
+    def __repr__(self, _repr_running={}):
+        'od.__repr__() <==> repr(od)'
+        call_key = id(self), _get_ident()
+        if call_key in _repr_running:
+            return '...'
+        _repr_running[call_key] = 1
+        try:
+            if not self:
+                return '{}'
+            return '{%s}' % ", ".join("%s: %s" % (k, v)
+                                      for k, v in self.items())
+        finally:
+            del _repr_running[call_key]
+
+
+def setup_logging(log_config, instance_id, verbosity_adjust=0):
+    """
+    Set up logging such that:
+    - The primary logging entry method is structlog
+      (see http://structlog.readthedocs.io/en/stable/index.html)
+    - By default, the logging backend is Python standard lib logger
+    """
+
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    # Configure standard logging
+    logging.config.dictConfig(log_config)
+    logging.root.level -= 10 * verbosity_adjust
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
+                        context_class=PlainRenderedOrderedDict,
+                        wrapper_class=BoundLogger,
+                        processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("first-line")
+    return log
+
+
+def update_logging(instance_id, vcore_id):
+    """
+    Add the vcore id to the structured logger
+    :param vcore_id:  The assigned vcore id
+    :return: structure logger
+    """
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        if instance_id is not None:
+            event_dict['instance_id'] = instance_id
+        return event_dict
+
+    def add_vcore_id(_, __, event_dict):
+        if vcore_id is not None:
+            event_dict['vcore_id'] = vcore_id
+        return event_dict
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        add_vcore_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("updated-logger")
+    return log
diff --git a/adapters/common/utils/__init__.py b/adapters/common/utils/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/utils/asleep.py b/adapters/common/utils/asleep.py
new file mode 100644
index 0000000..10d1ce3
--- /dev/null
+++ b/adapters/common/utils/asleep.py
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+""" Async sleep (asleep) method and other twisted goodies """
+
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+
+
+def asleep(dt):
+    """
+    Async (event driven) wait for given time period (in seconds)
+    :param dt: Delay in seconds
+    :return: Deferred to be fired with value None when time expires.
+    """
+    d = Deferred()
+    reactor.callLater(dt, lambda: d.callback(None))
+    return d
diff --git a/adapters/common/utils/consulhelpers.py b/adapters/common/utils/consulhelpers.py
new file mode 100644
index 0000000..6060ba3
--- /dev/null
+++ b/adapters/common/utils/consulhelpers.py
@@ -0,0 +1,178 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some consul related convenience functions
+"""
+
+from structlog import get_logger
+from consul import Consul
+from random import randint
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4
+
+log = get_logger()
+
+
+def connect_to_consult(consul_endpoint):
+    log.debug('getting-service-endpoint', consul=consul_endpoint)
+
+    host = consul_endpoint.split(':')[0].strip()
+    port = int(consul_endpoint.split(':')[1].strip())
+
+    return Consul(host=host, port=port)
+
+
+def verify_all_services_healthy(consul_endpoint, service_name=None,
+                                number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+def get_all_services(consul_endpoint):
+    log.debug('getting-service-verify-health')
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.services()
+
+    return services
+
+
+def get_all_instances_of_service(consul_endpoint, service_name):
+    log.debug('getting-all-instances-of-service', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    for service in services:
+        log.debug('service',
+                  name=service['ServiceName'],
+                  serviceid=service['ServiceID'],
+                  serviceport=service['ServicePort'],
+                  createindex=service['CreateIndex'])
+
+    return services
+
+
+def get_endpoint_from_consul(consul_endpoint, service_name):
+    """
+    Get endpoint of service_name from consul.
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service for which endpoint
+                         needs to be found.
+    :return: service endpoint if available, else exit.
+    """
+    log.debug('getting-service-info', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    if len(services) == 0:
+        raise Exception(
+            'Cannot find service {} in consul'.format(service_name))
+        os.exit(1)
+
+    """ Get host IPV4 address
+    """
+    local_ipv4 = get_my_primary_local_ipv4()
+    """ If host IP address from where the request came in matches
+        the IP address of the requested service's host IP address,
+        pick the endpoint
+    """
+    for i in range(len(services)):
+        service = services[i]
+        if service['ServiceAddress'] == local_ipv4:
+            log.debug("picking address locally")
+            endpoint = '{}:{}'.format(service['ServiceAddress'],
+                                      service['ServicePort'])
+            return endpoint
+
+    """ If service is not available locally, picak a random
+        endpoint for the service from the list
+    """
+    service = services[randint(0, len(services) - 1)]
+    endpoint = '{}:{}'.format(service['ServiceAddress'],
+                              service['ServicePort'])
+
+    return endpoint
+
+
+def get_healthy_instances(consul_endpoint, service_name=None,
+                          number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+if __name__ == '__main__':
+    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
+    # print get_healthy_instances('10.100.198.220:8500')
+    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/adapters/common/utils/deferred_utils.py b/adapters/common/utils/deferred_utils.py
new file mode 100644
index 0000000..3c55c1a
--- /dev/null
+++ b/adapters/common/utils/deferred_utils.py
@@ -0,0 +1,56 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.internet.error import AlreadyCalled
+
+
+class TimeOutError(Exception): pass
+
+
+class DeferredWithTimeout(Deferred):
+    """
+    Deferred with a timeout. If neither the callback nor the errback method
+    is not called within the given time, the deferred's errback will be called
+    with a TimeOutError() exception.
+
+    All other uses are the same as of Deferred().
+    """
+    def __init__(self, timeout=1.0):
+        Deferred.__init__(self)
+        self._timeout = timeout
+        self.timer = reactor.callLater(timeout, self.timed_out)
+
+    def timed_out(self):
+        self.errback(
+            TimeOutError('timed out after {} seconds'.format(self._timeout)))
+
+    def callback(self, result):
+        self._cancel_timer()
+        return Deferred.callback(self, result)
+
+    def errback(self, fail):
+        self._cancel_timer()
+        return Deferred.errback(self, fail)
+
+    def cancel(self):
+        self._cancel_timer()
+        return Deferred.cancel(self)
+
+    def _cancel_timer(self):
+        try:
+            self.timer.cancel()
+        except AlreadyCalled:
+            pass
+
diff --git a/adapters/common/utils/dockerhelpers.py b/adapters/common/utils/dockerhelpers.py
new file mode 100644
index 0000000..4620aef
--- /dev/null
+++ b/adapters/common/utils/dockerhelpers.py
@@ -0,0 +1,75 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some docker related convenience functions
+"""
+from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
+
+import os
+import socket
+from structlog import get_logger
+
+from docker import Client, errors
+
+
+docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
+log = get_logger()
+
+def get_my_containers_name():
+    """
+    Return the docker containers name in which this process is running.
+    To look up the container name, we use the container ID extracted from the
+    $HOSTNAME environment variable (which is set by docker conventions).
+    :return: String with the docker container name (or None if any issue is
+             encountered)
+    """
+    my_container_id = os.environ.get('HOSTNAME', None)
+
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(my_container_id)
+
+    except Exception, e:
+        log.exception('failed', my_container_id=my_container_id, e=e)
+        raise
+
+    name = info['Name'].lstrip('/')
+
+    return name
+
+def get_all_running_containers():
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        containers = docker_cli.containers()
+
+    except Exception, e:
+        log.exception('failed', e=e)
+        raise
+
+    return containers
+
+def inspect_container(id):
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(id)
+    except Exception, e:
+        log.exception('failed-inspect-container', id=id, e=e)
+        raise
+
+    return info
+
diff --git a/adapters/common/utils/grpc_utils.py b/adapters/common/utils/grpc_utils.py
new file mode 100644
index 0000000..8df630e
--- /dev/null
+++ b/adapters/common/utils/grpc_utils.py
@@ -0,0 +1,109 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Utilities to handle gRPC server and client side code in a Twisted environment
+"""
+import structlog
+from concurrent.futures import Future
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.python.threadable import isInIOThread
+
+
+log = structlog.get_logger()
+
+
+def twisted_async(func):
+    """
+    This decorator can be used to implement a gRPC method on the twisted
+    thread, allowing asynchronous programming in Twisted while serving
+    a gRPC call.
+
+    gRPC methods normally are called on the futures.ThreadPool threads,
+    so these methods cannot directly use Twisted protocol constructs.
+    If the implementation of the methods needs to touch Twisted, it is
+    safer (or mandatory) to wrap the method with this decorator, which will
+    call the inner method from the external thread and ensure that the
+    result is passed back to the foreign thread.
+
+    Example usage:
+
+    When implementing a gRPC server, typical pattern is:
+
+    class SpamService(SpamServicer):
+
+        def GetBadSpam(self, request, context):
+            '''this is called from a ThreadPoolExecutor thread'''
+            # generally unsafe to make Twisted calls
+
+        @twisted_async
+        def GetSpamSafely(self, request, context):
+            '''this method now is executed on the Twisted main thread
+            # safe to call any Twisted protocol functions
+
+        @twisted_async
+        @inlineCallbacks
+        def GetAsyncSpam(self, request, context):
+            '''this generator can use inlineCallbacks Twisted style'''
+            result = yield some_async_twisted_call(request)
+            returnValue(result)
+
+    """
+    def in_thread_wrapper(*args, **kw):
+
+        if isInIOThread():
+
+            return func(*args, **kw)
+
+        f = Future()
+
+        def twisted_wrapper():
+            try:
+                d = func(*args, **kw)
+                if isinstance(d, Deferred):
+
+                    def _done(result):
+                        f.set_result(result)
+                        f.done()
+
+                    def _error(e):
+                        f.set_exception(e)
+                        f.done()
+
+                    d.addCallback(_done)
+                    d.addErrback(_error)
+
+                else:
+                    f.set_result(d)
+                    f.done()
+
+            except Exception, e:
+                f.set_exception(e)
+                f.done()
+
+        reactor.callFromThread(twisted_wrapper)
+        try:
+            result = f.result()
+        except Exception, e:
+            log.exception(e=e, func=func, args=args, kw=kw)
+            raise
+
+        return result
+
+    return in_thread_wrapper
+
+
diff --git a/adapters/common/utils/id_generation.py b/adapters/common/utils/id_generation.py
new file mode 100644
index 0000000..e0fea1c
--- /dev/null
+++ b/adapters/common/utils/id_generation.py
@@ -0,0 +1,116 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# """ ID generation utils """
+
+from uuid import uuid4
+
+
+BROADCAST_CORE_ID=hex(0xFFFF)[2:]
+
+def get_next_core_id(current_id_in_hex_str):
+    """
+    :param current_id_in_hex_str: a hex string of the maximum core id 
+    assigned without the leading 0x characters
+    :return: current_id_in_hex_str + 1 in hex string 
+    """
+    if not current_id_in_hex_str or current_id_in_hex_str == '':
+        return '0001'
+    else:
+        return format(int(current_id_in_hex_str, 16) + 1, '04x')
+
+
+def create_cluster_logical_device_ids(core_id, switch_id):
+    """
+    Creates a logical device id and an OpenFlow datapath id that is unique 
+    across the Voltha cluster.
+    The returned logical device id  represents a 64 bits integer where the
+    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
+    the datapath id the core id is set to '0000' as it is not used for voltha
+    core routing
+    :param core_id: string
+    :param switch_id:int
+    :return: cluster logical device id and OpenFlow datapath id
+    """
+    switch_id = format(switch_id, '012x')
+    core_in_hex=format(int(core_id, 16), '04x')
+    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
+    dpid_id = '{}{}'.format('0000', switch_id[-12:])
+    return ld_id, int(dpid_id, 16)
+
+def is_broadcast_core_id(id):
+    assert id and len(id) == 16
+    return id[:4] == BROADCAST_CORE_ID
+
+def create_empty_broadcast_id():
+    """
+    Returns an empty broadcast id (ffff000000000000). The id is used to
+    dispatch xPON objects across all the Voltha instances.
+    :return: An empty broadcast id
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
+
+def create_cluster_id():
+    """
+    Returns an id that is common across all voltha instances.  The id  
+    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
+    object while the upper 16 bits refers a broadcast core_id
+    :return: An common id across all Voltha instances
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
+
+def create_cluster_device_id(core_id):
+    """
+    Creates a device id that is unique across the Voltha cluster.
+    The device id is a str of 64 bits.  The lower 48 bits refers to the 
+    device id while the upper 16 bits refers to the core id.
+    :param core_id: string
+    :return: cluster device id
+    """
+    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
+
+
+def get_core_id_from_device_id(device_id):
+    # Device id is a string and the first 4 characters represent the core_id
+    assert device_id and len(device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return device_id[:4]
+
+
+def get_core_id_from_logical_device_id(logical_device_id):
+    """ 
+    Logical Device id is a string and the first 4 characters represent the 
+    core_id
+    :param logical_device_id: 
+    :return: core_id string
+    """
+    assert logical_device_id and len(logical_device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return logical_device_id[:4]
+
+
+def get_core_id_from_datapath_id(datapath_id):
+    """
+    datapath id is a uint64 where:
+        - low 48 bits -> switch_id
+        - high 16 bits -> core id
+    :param datapath_id: 
+    :return: core_id string
+    """
+    assert datapath_id
+    # Get the hex string and remove the '0x' prefix
+    id_in_hex_str = hex(datapath_id)[2:]
+    assert len(id_in_hex_str) > 12
+    return id_in_hex_str[:-12]
diff --git a/adapters/common/utils/indexpool.py b/adapters/common/utils/indexpool.py
new file mode 100644
index 0000000..858cb3a
--- /dev/null
+++ b/adapters/common/utils/indexpool.py
@@ -0,0 +1,64 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from bitstring import BitArray
+import structlog
+
+log = structlog.get_logger()
+
+class IndexPool(object):
+    def __init__(self, max_entries, offset):
+        self.max_entries = max_entries
+        self.offset = offset
+        self.indices = BitArray(self.max_entries)
+
+    def get_next(self):
+        try:
+            _pos = self.indices.find('0b0')
+            self.indices.set(1, _pos)
+            return self.offset + _pos[0]
+        except IndexError:
+            log.info("exception-fail-to-allocate-id-all-bits-in-use")
+            return None
+
+    def allocate(self, index):
+        try:
+            _pos = index - self.offset
+            if not (0 <= _pos < self.max_entries):
+                log.info("{}-out-of-range".format(index))
+                return None
+            if self.indices[_pos]:
+                log.info("{}-is-already-allocated".format(index))
+                return None
+            self.indices.set(1, _pos)
+            return index
+
+        except IndexError:
+            return None
+
+    def release(self, index):
+        index -= self.offset
+        _pos = (index,)
+        try:
+            self.indices.set(0, _pos)
+        except IndexError:
+            log.info("bit-position-{}-out-of-range".format(index))
+
+    #index or multiple indices to set all of them to 1 - need to be a tuple
+    def pre_allocate(self, index):
+        if(isinstance(index, tuple)):
+            _lst = list(index)
+            for i in range(len(_lst)):
+                _lst[i] -= self.offset
+            index = tuple(_lst)
+            self.indices.set(1, index)
diff --git a/adapters/common/utils/json_format.py b/adapters/common/utils/json_format.py
new file mode 100644
index 0000000..c18d013
--- /dev/null
+++ b/adapters/common/utils/json_format.py
@@ -0,0 +1,105 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Monkey patched json_format to allow best effort decoding of Any fields.
+Use the additional flag (strict_any_handling=False) to trigger the
+best-effort behavior. Omit the flag, or just use the original json_format
+module fot the strict behavior.
+"""
+
+from google.protobuf import json_format
+
+class _PatchedPrinter(json_format._Printer):
+
+    def __init__(self, including_default_value_fields=False,
+                 preserving_proto_field_name=False,
+                 strict_any_handling=False):
+        super(_PatchedPrinter, self).__init__(including_default_value_fields,
+                                              preserving_proto_field_name)
+        self.strict_any_handling = strict_any_handling
+
+    def _BestEffortAnyMessageToJsonObject(self, msg):
+        try:
+            res = self._AnyMessageToJsonObject(msg)
+        except TypeError:
+            res = self._RegularMessageToJsonObject(msg, {})
+        return res
+
+
+def MessageToDict(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+    """Converts protobuf message to a JSON dictionary.
+
+    Args:
+      message: The protocol buffers message instance to serialize.
+      including_default_value_fields: If True, singular primitive fields,
+          repeated fields, and map fields will always be serialized.  If
+          False, only serialize non-empty fields.  Singular message fields
+          and oneof fields are not affected by this option.
+      preserving_proto_field_name: If True, use the original proto field
+          names as defined in the .proto file. If False, convert the field
+          names to lowerCamelCase.
+      strict_any_handling: If True, converion will error out (like in the
+          original method) if an Any field with value for which the Any type
+          is not loaded is encountered. If False, the conversion will leave
+          the field un-packed, but otherwise will continue.
+
+    Returns:
+      A dict representation of the JSON formatted protocol buffer message.
+    """
+    printer = _PatchedPrinter(including_default_value_fields,
+                              preserving_proto_field_name,
+                              strict_any_handling=strict_any_handling)
+    # pylint: disable=protected-access
+    return printer._MessageToJsonObject(message)
+
+
+def MessageToJson(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+  """Converts protobuf message to JSON format.
+
+  Args:
+    message: The protocol buffers message instance to serialize.
+    including_default_value_fields: If True, singular primitive fields,
+        repeated fields, and map fields will always be serialized.  If
+        False, only serialize non-empty fields.  Singular message fields
+        and oneof fields are not affected by this option.
+    preserving_proto_field_name: If True, use the original proto field
+        names as defined in the .proto file. If False, convert the field
+        names to lowerCamelCase.
+    strict_any_handling: If True, converion will error out (like in the
+        original method) if an Any field with value for which the Any type
+        is not loaded is encountered. If False, the conversion will leave
+        the field un-packed, but otherwise will continue.
+
+  Returns:
+    A string containing the JSON formatted protocol buffer message.
+  """
+  printer = _PatchedPrinter(including_default_value_fields,
+                            preserving_proto_field_name,
+                            strict_any_handling=strict_any_handling)
+  return printer.ToJsonString(message)
+
+
+json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
+    '_BestEffortAnyMessageToJsonObject',
+    '_ConvertAnyMessage'
+]
+
+json_format._Printer._BestEffortAnyMessageToJsonObject = \
+    json_format._Printer._AnyMessageToJsonObject
diff --git a/adapters/common/utils/message_queue.py b/adapters/common/utils/message_queue.py
new file mode 100644
index 0000000..2b4257a
--- /dev/null
+++ b/adapters/common/utils/message_queue.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from twisted.internet.defer import Deferred
+from twisted.internet.defer import succeed
+
+
+class MessageQueue(object):
+    """
+    An event driven queue, similar to twisted.internet.defer.DeferredQueue
+    but which allows selective dequeing based on a predicate function.
+    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
+    limit.
+    """
+
+    def __init__(self):
+        self.waiting = []  # tuples of (d, predicate)
+        self.queue = []  # messages piling up here if no one is waiting
+
+    def reset(self):
+        """
+        Purge all content as well as waiters (by errback-ing their entries).
+        :return: None
+        """
+        for d, _ in self.waiting:
+            d.errback(Exception('mesage queue reset() was called'))
+        self.waiting = []
+        self.queue = []
+
+    def _cancelGet(self, d):
+        """
+        Remove a deferred from our waiting list.
+        :param d: The deferred that was been canceled.
+        :return: None
+        """
+        for i in range(len(self.waiting)):
+            if self.waiting[i][0] is d:
+                self.waiting.pop(i)
+
+    def put(self, obj):
+        """
+        Add an object to this queue
+        :param obj: arbitrary object that will be added to the queue
+        :return:
+        """
+
+        # if someone is waiting for this, return right away
+        for i in range(len(self.waiting)):
+            d, predicate = self.waiting[i]
+            if predicate is None or predicate(obj):
+                self.waiting.pop(i)
+                d.callback(obj)
+                return
+
+        # otherwise...
+        self.queue.append(obj)
+
+    def get(self, predicate=None):
+        """
+        Attempt to retrieve and remove an object from the queue that
+        matches the optional predicate.
+        :return: Deferred which fires with the next object available.
+        If predicate was provided, only objects for which
+        predicate(obj) is True will be considered.
+        """
+        for i in range(len(self.queue)):
+            msg = self.queue[i]
+            if predicate is None or predicate(msg):
+                self.queue.pop(i)
+                return succeed(msg)
+
+        # there were no matching entries if we got here, so we wait
+        d = Deferred(canceller=self._cancelGet)
+        self.waiting.append((d, predicate))
+        return d
+
+
diff --git a/adapters/common/utils/nethelpers.py b/adapters/common/utils/nethelpers.py
new file mode 100644
index 0000000..b17aced
--- /dev/null
+++ b/adapters/common/utils/nethelpers.py
@@ -0,0 +1,86 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some network related convenience functions
+"""
+
+from netifaces import AF_INET
+
+import netifaces as ni
+import netaddr
+
+
+def _get_all_interfaces():
+    m_interfaces = []
+    for iface in ni.interfaces():
+        m_interfaces.append((iface, ni.ifaddresses(iface)))
+    return m_interfaces
+
+
+def _get_my_primary_interface():
+    gateways = ni.gateways()
+    assert 'default' in gateways, \
+        ("No default gateway on host/container, "
+         "cannot determine primary interface")
+    default_gw_index = gateways['default'].keys()[0]
+    # gateways[default_gw_index] has the format (example):
+    # [('10.15.32.1', 'en0', True)]
+    interface_name = gateways[default_gw_index][0][1]
+    return interface_name
+
+
+def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
+    if not inter_core_subnet:
+        return _get_my_primary_local_ipv4(ifname)
+    # My IP should belong to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            _ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(inter_core_subnet)
+            if _ip >= m_network.first and _ip <= m_network.last:
+                return m_ip
+    return None
+
+
+def get_my_primary_interface(pon_subnet=None):
+    if not pon_subnet:
+        return _get_my_primary_interface()
+    # My interface should have an IP that belongs to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            m_ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(pon_subnet)
+            if m_ip >= m_network.first and m_ip <= m_network.last:
+                return iface
+    return None
+
+
+def _get_my_primary_local_ipv4(ifname=None):
+    try:
+        ifname = get_my_primary_interface() if ifname is None else ifname
+        addresses = ni.ifaddresses(ifname)
+        ipv4 = addresses[AF_INET][0]['addr']
+        return ipv4
+    except Exception as e:
+        return None
+
+if __name__ == '__main__':
+    print get_my_primary_local_ipv4()
diff --git a/adapters/common/utils/ordered_weakvalue_dict.py b/adapters/common/utils/ordered_weakvalue_dict.py
new file mode 100644
index 0000000..9ea739a
--- /dev/null
+++ b/adapters/common/utils/ordered_weakvalue_dict.py
@@ -0,0 +1,48 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from _weakref import ref
+from weakref import KeyedRef
+from collections import OrderedDict
+
+
+class OrderedWeakValueDict(OrderedDict):
+    """
+    Modified OrderedDict to use weak references as values. Entries disappear
+    automatically if the referred value has no more strong reference pointing
+    ot it.
+
+    Warning, this is not a complete implementation, only what is needed for
+    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
+    """
+    def __init__(self, *args, **kw):
+        def remove(wr, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                super(OrderedWeakValueDict, self).__delitem__(wr.key)
+        self._remove = remove
+        super(OrderedWeakValueDict, self).__init__(*args, **kw)
+
+    def __setitem__(self, key, value):
+        super(OrderedWeakValueDict, self).__setitem__(
+            key, KeyedRef(value, self._remove, key))
+
+    def __getitem__(self, key):
+        o = super(OrderedWeakValueDict, self).__getitem__(key)()
+        if o is None:
+            raise KeyError, key
+        else:
+            return o
+
diff --git a/adapters/common/utils/registry.py b/adapters/common/utils/registry.py
new file mode 100644
index 0000000..270bd71
--- /dev/null
+++ b/adapters/common/utils/registry.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Simple component registry to provide centralized access to any registered
+components.
+"""
+from collections import OrderedDict
+from zope.interface import Interface
+
+
+class IComponent(Interface):
+    """
+    A Voltha Component
+    """
+
+    def start():
+        """
+        Called once the componet is instantiated. Can be used for async
+        initialization.
+        :return: (None or Deferred)
+        """
+
+    def stop():
+        """
+        Called once before the component is unloaded. Can be used for async
+        cleanup operations.
+        :return: (None or Deferred)
+        """
+
+
+class Registry(object):
+
+    def __init__(self):
+        self.components = OrderedDict()
+
+    def register(self, name, component):
+        assert IComponent.providedBy(component)
+        assert name not in self.components
+        self.components[name] = component
+        return component
+
+    def unregister(self, name):
+        if name in self.components:
+            del self.components[name]
+
+    def __call__(self, name):
+        return self.components[name]
+
+    def iterate(self):
+        return self.components.values()
+
+
+# public shared registry
+registry = Registry()
diff --git a/adapters/docker/Dockerfile.base b/adapters/docker/Dockerfile.base
new file mode 100644
index 0000000..a50a3ee
--- /dev/null
+++ b/adapters/docker/Dockerfile.base
@@ -0,0 +1,34 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Update to have latest images
+RUN apt-get update && \
+    apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
+
+COPY adapters/requirements.txt /tmp/requirements.txt
+
+# pip install cython enum34 six && \
+# Install app dependencies
+RUN wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
+    dpkg -i *.deb && \
+    rm -f *.deb && \
+    apt-get update && \
+    apt-get install -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    pip install -r /tmp/requirements.txt && \
+    apt-get purge -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    apt-get autoremove -y
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_olt b/adapters/docker/Dockerfile.ponsim_adapter_olt
new file mode 100644
index 0000000..5b714f1
--- /dev/null
+++ b/adapters/docker/Dockerfile.ponsim_adapter_olt
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /adapters && touch /adapters/__init__.py
+ENV PYTHONPATH=/adapters
+COPY adapters/common /adapters/adapters/common
+COPY adapters/kafka /adapters/adapters/kafka
+COPY adapters/*.py /adapters/adapters/
+#COPY pki /voltha/pki
+COPY adapters/ponsim_olt /adapters/adapters/ponsim_olt
+RUN touch /adapters/adapters/__init__.py
+
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /adapters/adapters/protos
+COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
+COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+RUN touch /adapters/adapters/protos/__init__.py
+RUN touch /adapters/adapters/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/adapters/ponsim_olt/main.py"]
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_onu b/adapters/docker/Dockerfile.ponsim_adapter_onu
new file mode 100644
index 0000000..57cc113
--- /dev/null
+++ b/adapters/docker/Dockerfile.ponsim_adapter_onu
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /adapters && touch /adapters/__init__.py
+ENV PYTHONPATH=/adapters
+COPY adapters/common /adapters/adapters/common
+COPY adapters/kafka /adapters/adapters/kafka
+COPY adapters/*.py /adapters/adapters/
+#COPY pki /voltha/pki
+COPY adapters/ponsim_onu /adapters/adapters/ponsim_onu
+RUN touch /adapters/adapters/__init__.py
+
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /adapters/adapters/protos
+COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
+COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+RUN touch /adapters/adapters/protos/__init__.py
+RUN touch /adapters/adapters/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/adapters/ponsim_onu/main.py"]
diff --git a/adapters/docker/Dockerfile.protoc b/adapters/docker/Dockerfile.protoc
new file mode 100644
index 0000000..eef6f54
--- /dev/null
+++ b/adapters/docker/Dockerfile.protoc
@@ -0,0 +1,39 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG PROTOC_PREFIX=/usr/local
+ARG ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ARG PROTOC=${PROTOC_PREFIX}/bin/protoc
+ARG PROTOC_VERSION=3.3.0
+
+FROM ${REGISTRY}debian:stretch-slim
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+ENV PROTOC_PREFIX=/usr/local
+ENV ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ENV PROTOC=${PROTOC_PREFIX}/bin/protoc
+ENV PROTOC_VERSION=3.3.0
+ENV PROTOC_DOWNLOAD_PREFIX=https://github.com/google/protobuf/releases/download
+ENV PROTOC_DIR=protobuf-${PROTOC_VERSION}
+ENV PROTOC_TARBALL=protobuf-python-${PROTOC_VERSION}.tar.gz
+ENV PROTOC_DOWNLOAD_URI=${PROTOC_DOWNLOAD_PREFIX}/v${PROTOC_VERSION}/${PROTOC_TARBALL}
+
+RUN apt-get update -y && apt-get install -y wget build-essential python-dev python-pip
+RUN pip install grpcio-tools==1.3.5
+WORKDIR /build
+RUN wget -q --no-check-certificate ${PROTOC_DOWNLOAD_URI}
+RUN tar --strip-components=1 -zxf ${PROTOC_TARBALL}
+RUN ./configure --prefix=${PROTOC_PREFIX}
+RUN make install
diff --git a/adapters/docker/Dockerfile.protos b/adapters/docker/Dockerfile.protos
new file mode 100644
index 0000000..27e3db4
--- /dev/null
+++ b/adapters/docker/Dockerfile.protos
@@ -0,0 +1,35 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG REPOSITORY=
+ARG TAG=latest
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} as builder
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+COPY adapters/protos/third_party/google/api/*.proto /protos/google/api/
+COPY adapters/docker/config/Makefile.protos /protos/google/api/Makefile.protos
+WORKDIR /protos
+RUN make -f google/api/Makefile.protos google_api
+RUN touch /protos/google/__init__.py /protos/google/api/__init__.py
+
+COPY protos/*.proto /protos/voltha/
+COPY adapters/docker/config/Makefile.protos /protos/voltha/Makefile.protos
+WORKDIR /protos/voltha
+RUN make -f Makefile.protos build
+
+# Copy the files to a scrach based container to minimize its size
+FROM ${REGISTRY}scratch
+COPY --from=builder /protos/ /protos/
diff --git a/adapters/docker/config/Makefile.protos b/adapters/docker/config/Makefile.protos
new file mode 100644
index 0000000..12ff9e3
--- /dev/null
+++ b/adapters/docker/config/Makefile.protos
@@ -0,0 +1,59 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: build
+
+PROTO_FILES := $(wildcard *.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_All_PB2_C_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2.pyc,$(f)))
+PROTO_ALL_PB2_GPRC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_ALL_DESC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,.desc,$(f)))
+
+# Google API needs to be built from within the third party directory
+#
+google_api:
+	python -m grpc.tools.protoc \
+	    -I. \
+            --python_out=. \
+            --grpc_python_out=. \
+            --descriptor_set_out=google/api/annotations.desc \
+            --include_imports \
+            --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto
+	python -m grpc.tools.protoc \
+                -I. \
+                -I/protos \
+                --python_out=. \
+                --grpc_python_out=. \
+                --descriptor_set_out=$(basename $<).desc \
+                --include_imports \
+                --include_source_info \
+                $<
+
+clean:
+	rm -f $(PROTO_PB2_FILES) \
+		$(PROTO_ALL_DESC_FILES) \
+		$(PROTO_ALL_PB2_GPRC_FILES) \
+		$(PROTO_All_PB2_C_FILES) \
+		$(PROTO_PB2_GOOGLE_API)
diff --git a/adapters/iadapter.py b/adapters/iadapter.py
new file mode 100644
index 0000000..c6ea3ca
--- /dev/null
+++ b/adapters/iadapter.py
@@ -0,0 +1,301 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Adapter abstract base class
+"""
+
+import structlog
+from zope.interface import implementer
+from twisted.internet import reactor
+
+from adapters.protos.common_pb2 import AdminState
+from adapters.protos.device_pb2 import DeviceType, DeviceTypes
+from adapters.interface import IAdapterInterface
+from adapters.protos.adapter_pb2 import Adapter
+from adapters.protos.adapter_pb2 import AdapterConfig
+from adapters.protos.common_pb2 import LogLevel
+from adapters.protos.health_pb2 import HealthStatus
+from adapters.protos.device_pb2 import Device
+
+log = structlog.get_logger()
+
+
+@implementer(IAdapterInterface)
+class IAdapter(object):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type, vendor_id,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False, core_proxy=None):
+        log.debug('Initializing adapter: {} {} {}'.format(vendor, name, version))
+        self.adapter_agent = adapter_agent
+        self.core_proxy=core_proxy
+        self.config = config
+        self.name = name
+        self.supported_device_types = [
+            DeviceType(
+                id=device_type,
+                vendor_id=vendor_id,
+                adapter=name,
+                accepts_bulk_flow_update=accepts_bulk_flow_update,
+                accepts_add_remove_flow_updates=accepts_add_remove_flow_updates
+            )
+        ]
+        self.descriptor = Adapter(
+            id=self.name,
+            vendor=vendor,
+            version=version,
+            config=AdapterConfig(log_level=LogLevel.INFO)
+        )
+        self.devices_handlers = dict()  # device_id -> Olt/OnuHandler()
+        self.device_handler_class = device_handler_class
+
+    def start(self):
+        log.info('Starting adapter: {}'.format(self.name))
+
+    def stop(self):
+        log.info('Stopping adapter: {}'.format(self.name))
+
+    def adapter_descriptor(self):
+        return self.descriptor
+
+    def device_types(self):
+        return DeviceTypes(items=self.supported_device_types)
+
+    def health(self):
+        # return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
+        return HealthStatus(state=HealthStatus.HEALTHY)
+
+    def change_master_state(self, master):
+        raise NotImplementedError()
+
+    def get_ofp_device_info(self, device):
+        log.debug('get_ofp_device_info', device_id=device.id)
+        return self.devices_handlers[device.id].get_ofp_device_info(device)
+
+    def get_ofp_port_info(self, device, port_no):
+        log.debug('get_ofp_port_info', device_id=device.id, port_no=port_no)
+        return self.devices_handlers[device.id].get_ofp_port_info(device, port_no)
+
+    def adopt_device(self, device):
+        log.debug('adopt_device', device_id=device.id)
+        self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].activate, device)
+        log.debug('adopt_device_done', device_id=device.id)
+        return device
+
+    def reconcile_device(self, device):
+        raise NotImplementedError()
+
+    def abandon_device(self, device):
+        raise NotImplementedError()
+
+    def disable_device(self, device):
+        log.info('disable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].disable)
+        return device
+
+    def reenable_device(self, device):
+        log.info('reenable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reenable)
+        return device
+
+    def reboot_device(self, device):
+        log.info('reboot-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reboot)
+        return device
+
+    def download_image(self, device, request):
+        raise NotImplementedError()
+
+    def get_image_download_status(self, device, request):
+        raise NotImplementedError()
+
+    def cancel_image_download(self, device, request):
+        raise NotImplementedError()
+
+    def activate_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def revert_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def self_test_device(self, device):
+        log.info('self-test-req', device_id=device.id)
+        result = reactor.callLater(0, self.devices_handlers[device.id].self_test_device)
+        return result
+
+    def delete_device(self, device):
+        log.info('delete-device', device_id=device.id)
+        #  TODO: Update the logical device mapping
+        reactor.callLater(0, self.devices_handlers[device.id].delete)
+        return device
+
+    def get_device_details(self, device):
+        raise NotImplementedError()
+
+    def update_flows_bulk(self, device, flows, groups):
+        log.info('bulk-flow-update', device_id=device.id,
+                 flows=flows, groups=groups)
+        assert len(groups.items) == 0
+        handler = self.devices_handlers[device.id]
+        return handler.update_flow_table(flows.items)
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        log.info('incremental-flow-update', device_id=device.id,
+                 flows=flow_changes, groups=group_changes)
+        # For now, there is no support for group changes
+        assert len(group_changes.to_add.items) == 0
+        assert len(group_changes.to_remove.items) == 0
+
+        handler = self.devices_handlers[device.id]
+        # Remove flows
+        if len(flow_changes.to_remove.items) != 0:
+            handler.remove_from_flow_table(flow_changes.to_remove.items)
+
+        # Add flows
+        if len(flow_changes.to_add.items) != 0:
+            handler.add_to_flow_table(flow_changes.to_add.items)
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        raise NotImplementedError()
+
+    def receive_proxied_message(self, proxy_address, msg):
+        raise NotImplementedError()
+
+    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+        raise NotImplementedError()
+
+    def receive_inter_adapter_message(self, msg):
+        raise NotImplementedError()
+
+    def suppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def unsuppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def _get_handler(self, device):
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                return handler
+            return None
+
+"""
+OLT Adapter base class
+"""
+class OltAdapter(IAdapter):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False,
+                 core_proxy=None):
+        super(OltAdapter, self).__init__(adapter_agent=adapter_agent,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=None,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates,
+                                         core_proxy=None)
+        self.logical_device_id_to_root_device_id = dict()
+
+    def reconcile_device(self, device):
+        try:
+            self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+            # Work only required for devices that are in ENABLED state
+            if device.admin_state == AdminState.ENABLED:
+                reactor.callLater(0,
+                                  self.devices_handlers[device.id].reconcile,
+                                  device)
+            else:
+                # Invoke the children reconciliation which would setup the
+                # basic children data structures
+                self.adapter_agent.reconcile_child_devices(device.id)
+            return device
+        except Exception, e:
+            log.exception('Exception', e=e)
+
+    def send_proxied_message(self, proxy_address, msg):
+        log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
+        handler = self.devices_handlers[proxy_address.device_id]
+        handler.send_proxied_message(proxy_address, msg)
+
+    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+        def ldi_to_di(ldi):
+            di = self.logical_device_id_to_root_device_id.get(ldi)
+            if di is None:
+                logical_device = self.adapter_agent.get_logical_device(ldi)
+                di = logical_device.root_device_id
+                self.logical_device_id_to_root_device_id[ldi] = di
+            return di
+
+        device_id = ldi_to_di(logical_device_id)
+        handler = self.devices_handlers[device_id]
+        handler.packet_out(egress_port_no, msg)
+
+
+"""
+ONU Adapter base class
+"""
+
+
+class OnuAdapter(IAdapter):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type, vendor_id, accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False):
+        super(OnuAdapter, self).__init__(adapter_agent=adapter_agent,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=vendor_id,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates,
+                                         core_proxy=None
+                                         )
+
+    def reconcile_device(self, device):
+        self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+        # Reconcile only if state was ENABLED
+        if device.admin_state == AdminState.ENABLED:
+            reactor.callLater(0,
+                              self.devices_handlers[device.id].reconcile,
+                              device)
+        return device
+
+    def receive_proxied_message(self, proxy_address, msg):
+        log.info('receive-proxied-message', proxy_address=proxy_address,
+                 device_id=proxy_address.device_id, msg=msg)
+        # Device_id from the proxy_address is the olt device id. We need to
+        # get the onu device id using the port number in the proxy_address
+        device = self.adapter_agent. \
+            get_child_device_with_proxy_address(proxy_address)
+        if device:
+            handler = self.devices_handlers[device.id]
+            handler.receive_message(msg)
diff --git a/adapters/interface.py b/adapters/interface.py
new file mode 100644
index 0000000..860c8ba
--- /dev/null
+++ b/adapters/interface.py
@@ -0,0 +1,770 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Interface definition for Voltha Adapters
+"""
+from zope.interface import Interface
+
+
+class IAdapterInterface(Interface):
+    """
+    A Voltha adapter.  This interface is used by the Voltha Core to initiate
+    requests towards a voltha adapter.
+    """
+
+    def adapter_descriptor():
+        """
+        Return the adapter descriptor object for this adapter.
+        :return: voltha.Adapter grpc object (see voltha/protos/adapter.proto),
+        with adapter-specific information and config extensions.
+        """
+
+    def device_types():
+        """
+        Return list of device types supported by the adapter.
+        :return: voltha.DeviceTypes protobuf object, with optional type
+        specific extensions.
+        """
+
+    def health():
+        """
+        Return a 3-state health status using the voltha.HealthStatus message.
+        :return: Deferred or direct return with voltha.HealthStatus message
+        """
+
+    def adopt_device(device):
+        """
+        Make sure the adapter looks after given device. Called when a device
+        is provisioned top-down and needs to be activated by the adapter.
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def reconcile_device(device):
+        """
+        Make sure the adapter looks after given device. Called when this
+        device has changed ownership from another Voltha instance to
+        this one (typically, this occurs when the previous voltha
+        instance went down).
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def abandon_device(device):
+        """
+        Make sur ethe adapter no longer looks after device. This is called
+        if device ownership is taken over by another Voltha instance.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge abandonment.
+        """
+
+    def disable_device(device):
+        """
+        This is called when a previously enabled device needs to be disabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge disabling the device.
+        """
+
+    def reenable_device(device):
+        """
+        This is called when a previously disabled device needs to be enabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge re-enabling the
+        device.
+        """
+
+    def reboot_device(device):
+        """
+        This is called to reboot a device based on a NBI call.  The admin
+        state of the device will not change after the reboot
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the reboot.
+        """
+
+    def download_image(device, request):
+        """
+        This is called to request downloading a specified image into
+        the standby partition of a device based on a NBI call.
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge the download.
+        """
+
+    def get_image_download_status(device, request):
+        """
+        This is called to inquire about a requested image download
+        status based on a NBI call.
+        The adapter is expected to update the DownloadImage DB object
+        with the query result
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def cancel_image_download(device, request):
+        """
+        This is called to cancel a requested image download
+        based on a NBI call.  The admin state of the device will not
+        change after the download.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def activate_image_update(device, request):
+        """
+        This is called to activate a downloaded image from
+        a standby partition into active partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        activated image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def revert_image_update(device, request):
+        """
+        This is called to deactivate the specified image at
+        active partition, and revert to previous image at
+        standby partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        previous image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def self_test_device(device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+
+    def delete_device(device):
+        """
+        This is called to delete a device from the PON based on a NBI call.
+        If the device is an OLT then the whole PON will be deleted.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the deletion.
+        """
+
+    def get_device_details(device):
+        """
+        This is called to get additional device details based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the retrieval of
+        additional details.
+        """
+
+    def update_flows_bulk(device, flows, groups):
+        """
+        Called after any flow table change, but only if the device supports
+        bulk mode, which is expressed by the 'accepts_bulk_flow_update'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flows: An openflow_v13.Flows object
+        :param groups: An  openflow_v13.Flows object
+        :return: (Deferred or None)
+        """
+
+    def update_flows_incrementally(device, flow_changes, group_changes):
+        """
+        Called after a flow table update, but only if the device supports
+        non-bulk mode, which is expressed by the 'accepts_add_remove_flow_updates'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flow_changes: An openflow_v13.FlowChanges object
+        :param group_changes: An openflow_v13.FlowGroupChanges object
+        :return: (Deferred or None)
+        """
+
+    def update_pm_config(device, pm_configs):
+        """
+        Called every time a request is made to change pm collection behavior
+        :param device: A Voltha.Device object
+        :param pm_collection_config: A Pms
+        """
+
+    def receive_packet_out(device_id, egress_port_no, msg):
+        """
+        Pass a packet_out message content to adapter so that it can forward
+        it out to the device. This is only called on root devices.
+        :param device_id: device ID
+        :param egress_port: egress logical port number
+         :param msg: actual message
+        :return: None
+        """
+
+    def suppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should be suppressed
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the suppression.
+        """
+
+    def unsuppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should resume
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the unsuppression.
+        """
+
+    def get_ofp_device_info(device):
+        """
+        Retrieve the OLT device info. This includes the ofp_desc and
+        ofp_switch_features. The existing ofp structures can be used,
+        or all the attributes get added to the Device definition or a new proto
+        definition gets created. This API will allow the Core to create a
+        LogicalDevice associated with this device (OLT only).
+        :param device: device
+        :return: Proto Message (TBD)
+        """
+
+    def get_ofp_port_info(device, port_no):
+        """
+        Retrieve the port info. This includes the ofp_port. The existing ofp
+        structure can be used, or all the attributes get added to the Port
+        definitions or a new proto definition gets created.  This API will allow
+        the Core to create a LogicalPort associated with this device.
+        :param device: device
+        :param port_no: port number
+        :return: Proto Message (TBD)
+        """
+
+    # def start():
+    #     """
+    #     Called once after adapter instance is laoded. Can be used to async
+    #     initialization.
+    #     :return: (None or Deferred)
+    #     """
+    #
+    # def stop():
+    #     """
+    #     Called once before adapter is unloaded. It can be used to perform
+    #     any cleanup after the adapter.
+    #     :return: (None or Deferred)
+    #     """
+    #
+    # def receive_inter_adapter_message(msg):
+    #     """
+    #     Called when the adapter recieves a message that was sent to it directly
+    #     from another adapter. An adapter may register for these messages by calling
+    #     the register_for_inter_adapter_messages() method in the adapter agent.
+    #     Note that it is the responsibility of the sending and receiving
+    #     adapters to properly encode and decode the message.
+    #     :param msg: The message contents.
+    #     :return: None
+    #     """
+    #
+    # def send_proxied_message(proxy_address, msg):
+    #     """
+    #     Forward a msg to a child device of device, addressed by the given
+    #     proxy_address=Device.ProxyAddress().
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device.
+    #     :param msg: (str) The actual message to send.
+    #     :return: (Deferred(None) or None) The return of this method should
+    #      indicate that the message was successfully *sent*.
+    #     """
+    #
+    # def receive_proxied_message(proxy_address, msg):
+    #     """
+    #     Pass an async message (arrived via a proxy) to this device.
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Note this is the proxy_address with which the adapter
+    #      had to register prior to receiving proxied messages.
+    #     :param msg: (str) The actual message received.
+    #     :return: None
+    #     """
+    #
+    # def receive_packet_out(logical_device_id, egress_port_no, msg):
+    #     """
+    #     Pass a packet_out message content to adapter so that it can forward it
+    #     out to the device. This is only called on root devices.
+    #     :param logical_device_id:
+    #     :param egress_port: egress logical port number
+    #     :param msg: actual message
+    #     :return: None
+    #     """
+    #
+    # def change_master_state(master):
+    #     """
+    #     Called to indicate if plugin shall assume or lose master role. The
+    #     master role can be used to perform functions that must be performed
+    #     from a single point in the cluster. In single-node deployments of
+    #     Voltha, the plugins are always in master role.
+    #     :param master: (bool) True to indicate the mastership needs to be
+    #      assumed; False to indicate that mastership needs to be abandoned.
+    #     :return: (Deferred) which is fired by the adapter when mastership is
+    #      assumed/dropped, respectively.
+    #     """
+
+
+# class IAdapterAgent(Interface):
+#     """
+#     This object is passed in to the __init__ function of each adapter,
+#     and can be used by the adapter implementation to initiate async calls
+#     toward Voltha's CORE via the APIs defined here.
+#     """
+#
+#     def get_device(device_id):
+#         # TODO add doc
+#         """"""
+#
+#     def add_device(device):
+#         # TODO add doc
+#         """"""
+#
+#     def update_device(device):
+#         # TODO add doc
+#         """"""
+#
+#     def add_port(device_id, port):
+#         # TODO add doc
+#         """"""
+#
+#     def create_logical_device(logical_device):
+#         # TODO add doc
+#         """"""
+#
+#     def add_logical_port(logical_device_id, port):
+#         # TODO add doc
+#         """"""
+#
+#     def child_device_detected(parent_device_id,
+#                               parent_port_no,
+#                               child_device_type,
+#                               proxy_address,
+#                               admin_state,
+#                               **kw):
+#         # TODO add doc
+#         """"""
+#
+#     def send_proxied_message(proxy_address, msg):
+#         """
+#         Forward a msg to a child device of device, addressed by the given
+#         proxy_address=Device.ProxyAddress().
+#         :param proxy_address: Address info for the parent device
+#          to route the message to the child device. This was given to the
+#          child device by the parent device at the creation of the child
+#          device.
+#         :param msg: (str) The actual message to send.
+#         :return: (Deferred(None) or None) The return of this method should
+#          indicate that the message was successfully *sent*.
+#         """
+#
+#     def receive_proxied_message(proxy_address, msg):
+#         """
+#         Pass an async message (arrived via a proxy) to this device.
+#         :param proxy_address: Address info for the parent device
+#          to route the message to the child device. This was given to the
+#          child device by the parent device at the creation of the child
+#          device. Note this is the proxy_address with which the adapter
+#          had to register prior to receiving proxied messages.
+#         :param msg: (str) The actual message received.
+#         :return: None
+#         """
+#
+#     def register_for_proxied_messages(proxy_address):
+#         """
+#         A child device adapter can use this to indicate its intent to
+#         receive async messages sent via a parent device. Example: an
+#         ONU adapter can use this to register for OMCI messages received
+#         via the OLT and the OLT adapter.
+#         :param child_device_address: Address info that was given to the
+#          child device by the parent device at the creation of the child
+#          device. Its uniqueness acts as a router information for the
+#          registration.
+#         :return: None
+#         """
+#
+#     def unregister_for_proxied_messages(proxy_address):
+#         """
+#         Cancel a previous registration
+#         :return:
+#         """
+#
+#     def send_packet_in(logical_device_id, logical_port_no, packet):
+#         """
+#         Forward given packet to the northbound toward an SDN controller.
+#         :param device_id: logical device identifier
+#         :param logical_port_no: logical port_no (as numbered in openflow)
+#         :param packet: the actual packet; can be a serialized string or a scapy
+#                        Packet.
+#         :return: None returned on success
+#         """
+#
+#     def submit_kpis(kpi_event_msg):
+#         """
+#         Submit KPI metrics on behalf of the OLT and its adapter. This can
+#         include hardware related metrics, usage and utilization metrics, as
+#         well as optional adapter specific metrics.
+#         :param kpi_event_msg: A protobuf message of KpiEvent type.
+#         :return: None
+#         """
+#
+#     def submit_alarm(device_id, alarm_event_msg):
+#         """
+#         Submit an alarm on behalf of the OLT and its adapter.
+#         :param alarm_event_msg: A protobuf message of AlarmEvent type.
+#         :return: None
+#         """
+#
+#     def register_for_onu_detect_state(proxy_address):
+#         """
+#
+#         :return: None
+#         """
+#
+#     def unregister_for_onu_detect_state(proxy_address):
+#         """
+#
+#         :return: None
+#         """
+#
+#     def forward_onu_detect_state(proxy_address, state):
+#         """
+#         Forward onu detect state to ONU adapter
+#         :param proxy_address: ONU device address
+#         :param state: ONU detect state (bool)
+#         :return: None
+#         """
+
+class ICoreSouthBoundInterface(Interface):
+    """
+    Represents a Voltha Core. This is used by an adapter to initiate async
+    calls towards Voltha Core.
+    """
+
+    def get_device(device_id):
+        """
+        Retrieve a device using its ID.
+        :param device_id: a device ID
+        :return: Device Object or None
+        """
+
+    def get_child_device(parent_device_id, **kwargs):
+        """
+        Retrieve a child device object belonging to the specified parent
+        device based on some match criteria. The first child device that
+        matches the provided criteria is returned.
+        :param parent_device_id: parent's device protobuf ID
+        :param **kwargs: arbitrary list of match criteria where the Value
+        in each key-value pair must be a protobuf type
+        :return: Child Device Object or None
+        """
+
+    def get_ports(device_id, port_type):
+        """
+        Retrieve all the ports of a given type of a Device.
+        :param device_id: a device ID
+        :param port_type: type of port
+        :return Ports object
+        """
+
+    def get_child_devices(parent_device_id):
+        """
+        Get all child devices given a parent device id
+        :param parent_device_id: The parent device ID
+        :return: Devices object
+        """
+
+    def get_child_device_with_proxy_address(proxy_address):
+        """
+        Get a child device based on its proxy address. Proxy address is
+        defined as {parent id, channel_id}
+        :param proxy_address: A Device.ProxyAddress object
+        :return: Device object or None
+        """
+
+    def device_state_update(device_id,
+                            oper_status=None,
+                            connect_status=None):
+        """
+        Update a device state.
+        :param device_id: The device ID
+        :param oper_state: Operational state of device
+        :param conn_state: Connection state of device
+        :return: None
+        """
+
+
+    def child_device_detected(parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        """
+        A child device has been detected.  Core will create the device along
+        with its unique ID.
+        :param parent_device_id: The parent device ID
+        :param parent_port_no: The parent port number
+        :param device_type: The child device type
+        :param channel_id: A unique identifier for that child device within
+        the parent device (e.g. vlan_id)
+        :param kw: A list of key-value pair where the value is a protobuf
+        message
+        :return: None
+        """
+
+    def device_update(device):
+        """
+        Event corresponding to a device update.
+        :param device: Device Object
+        :return: None
+        """
+
+    def child_device_removed(parent_device_id, child_device_id):
+        """
+        Event indicating a child device has been removed from a parent.
+        :param parent_device_id: Device ID of the parent
+        :param child_device_id: Device ID of the child
+        :return: None
+        """
+
+    def child_devices_state_update(parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None,
+                                   admin_status=None):
+        """
+        Event indicating the status of all child devices have been changed.
+        :param parent_device_id: Device ID of the parent
+        :param oper_status: Operational status
+        :param connect_status: Connection status
+        :param admin_status: Admin status
+        :return: None
+        """
+
+    def child_devices_removed(parent_device_id):
+        """
+        Event indicating all child devices have been removed from a parent.
+        :param parent_device_id: Device ID of the parent device
+        :return: None
+        """
+
+    def device_pm_config_update(device_pm_config, init=False):
+        """
+        Event corresponding to a PM config update of a device.
+        :param device_pm_config: a PmConfigs object
+        :param init: True indicates initializing stage
+        :return: None
+        """
+
+    def port_created(device_id, port):
+        """
+        A port has been created and needs to be added to a device.
+        :param device_id: a device ID
+        :param port: Port object
+        :return None
+        """
+
+    def port_removed(device_id, port):
+        """
+        A port has been removed and it needs to be removed from a Device.
+        :param device_id: a device ID
+        :param port: a Port object
+        :return None
+        """
+
+    def ports_enabled(device_id):
+        """
+        All ports on that device have been re-enabled. The Core will change
+        the admin state to ENABLED and operational state to ACTIVE for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_disabled(device_id):
+        """
+        All ports on that device have been disabled. The Core will change the
+        admin status to DISABLED and operational state to UNKNOWN for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_oper_status_update(device_id, oper_status):
+        """
+        The operational status of all ports of a Device has been changed.
+        The Core will update the operational status for all ports on the
+        device.
+        :param device_id: a device ID
+        :param oper_status: operational Status
+        :return None
+        """
+
+    def image_download_update(img_dnld):
+        """
+        Event corresponding to an image download update.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def image_download_deleted(img_dnld):
+        """
+        Event corresponding to the deletion of a downloaded image. The
+        references of this image needs to be removed from the Core.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def packet_in(device_id, egress_port_no, packet):
+        """
+        Sends a packet to the SDN controller via voltha Core
+        :param device_id: The OLT device ID
+        :param egress_port_no: The port number representing the ONU (cvid)
+        :param packet: The actual packet
+         :return: None
+        """
+
+    # def add_device(device):
+    #     # TODO add doc
+    #     """"""
+
+    # def update_device(device):
+    #     # TODO add doc
+    #     """"""
+
+    # def add_port(device_id, port):
+    #     # TODO add doc
+    #     """"""
+
+    # def create_logical_device(logical_device):
+    #     # TODO add doc
+    #     """"""
+    #
+    # def add_logical_port(logical_device_id, port):
+    #     # TODO add doc
+    #     """"""
+
+    # def child_device_detected(parent_device_id,
+    #                           parent_port_no,
+    #                           child_device_type,
+    #                           proxy_address,
+    #                           admin_state,
+    #                           **kw):
+    #     # TODO add doc
+    #     """"""
+
+    # def send_proxied_message(proxy_address, msg):
+    #     """
+    #     Forward a msg to a child device of device, addressed by the given
+    #     proxy_address=Device.ProxyAddress().
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device.
+    #     :param msg: (str) The actual message to send.
+    #     :return: (Deferred(None) or None) The return of this method should
+    #      indicate that the message was successfully *sent*.
+    #     """
+    #
+    # def receive_proxied_message(proxy_address, msg):
+    #     """
+    #     Pass an async message (arrived via a proxy) to this device.
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Note this is the proxy_address with which the adapter
+    #      had to register prior to receiving proxied messages.
+    #     :param msg: (str) The actual message received.
+    #     :return: None
+    #     """
+    #
+    # def register_for_proxied_messages(proxy_address):
+    #     """
+    #     A child device adapter can use this to indicate its intent to
+    #     receive async messages sent via a parent device. Example: an
+    #     ONU adapter can use this to register for OMCI messages received
+    #     via the OLT and the OLT adapter.
+    #     :param child_device_address: Address info that was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Its uniqueness acts as a router information for the
+    #      registration.
+    #     :return: None
+    #     """
+    #
+    # def unregister_for_proxied_messages(proxy_address):
+    #     """
+    #     Cancel a previous registration
+    #     :return:
+    #     """
+    #
+    # def submit_kpis(kpi_event_msg):
+    #     """
+    #     Submit KPI metrics on behalf of the OLT and its adapter. This can
+    #     include hardware related metrics, usage and utilization metrics, as
+    #     well as optional adapter specific metrics.
+    #     :param kpi_event_msg: A protobuf message of KpiEvent type.
+    #     :return: None
+    #     """
+    #
+    # def submit_alarm(device_id, alarm_event_msg):
+    #     """
+    #     Submit an alarm on behalf of the OLT and its adapter.
+    #     :param alarm_event_msg: A protobuf message of AlarmEvent type.
+    #     :return: None
+    #     """
+
+    # def register_for_onu_detect_state(proxy_address):
+    #     """
+    #
+    #     :return: None
+    #     """
+    #
+    # def unregister_for_onu_detect_state(proxy_address):
+    #     """
+    #
+    #     :return: None
+    #     """
+    #
+    # def forward_onu_detect_state(proxy_address, state):
+    #     """
+    #     Forward onu detect state to ONU adapter
+    #     :param proxy_address: ONU device address
+    #     :param state: ONU detect state (bool)
+    #     :return: None
+    #     """
+    #
+    # def send_packet_in(logical_device_id, logical_port_no, packet):
+    #     """
+    #     Forward given packet to the northbound toward an SDN controller.
+    #     :param device_id: logical device identifier
+    #     :param logical_port_no: logical port_no (as numbered in openflow)
+    #     :param packet: the actual packet; can be a serialized string or a
+    #     scapy Packet.
+    #     :return: None returned on success
+    #     """
\ No newline at end of file
diff --git a/adapters/kafka/__init__.py b/adapters/kafka/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/kafka/__init__.py
diff --git a/adapters/kafka/adapter_request_facade.py b/adapters/kafka/adapter_request_facade.py
new file mode 100644
index 0000000..74ed934
--- /dev/null
+++ b/adapters/kafka/adapter_request_facade.py
@@ -0,0 +1,168 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between CORE and an individual adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import structlog
+from google.protobuf.json_format import MessageToJson
+from scapy.packet import Packet
+from twisted.internet.defer import inlineCallbacks, returnValue
+from zope.interface import implementer
+
+from adapters.common.event_bus import EventBusClient
+from adapters.common.frameio.frameio import hexify
+from adapters.common.utils.id_generation import create_cluster_logical_device_ids
+from adapters.interface import IAdapterInterface
+from adapters.protos.device_pb2 import Device
+
+from adapters.protos import third_party
+from adapters.protos.device_pb2 import Device, Port, PmConfigs
+from adapters.protos.events_pb2 import AlarmEvent, AlarmEventType, \
+    AlarmEventSeverity, AlarmEventState, AlarmEventCategory
+from adapters.protos.events_pb2 import KpiEvent
+from adapters.protos.voltha_pb2 import DeviceGroup, LogicalDevice, \
+    LogicalPort, AdminState, OperStatus, AlarmFilterRuleKey
+from adapters.common.utils.registry import registry
+from adapters.common.utils.id_generation import create_cluster_device_id
+from adapters.protos.core_adapter_pb2 import IntType
+import re
+
+
+class MacAddressError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+class IDError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+@implementer(IAdapterInterface)
+class AdapterRequestFacade(object):
+    """
+    Gate-keeper between CORE and device adapters.
+
+    On one side it interacts with Core's internal model and update/dispatch
+    mechanisms.
+
+    On the other side, it interacts with the adapters standard interface as
+    defined in
+    """
+
+    def __init__(self, adapter):
+        self.adapter = adapter
+
+    @inlineCallbacks
+    def start(self):
+        self.log.debug('starting')
+
+    @inlineCallbacks
+    def stop(self):
+        self.log.debug('stopping')
+
+    def adopt_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.adopt_device(d))
+        else:
+            return (False, d)
+
+    def get_ofp_device_info(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.get_ofp_device_info(d))
+        else:
+            return (False, d)
+
+    def get_ofp_port_info(self, device, port_no):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return (False, d)
+
+        p = IntType()
+        port_no.Unpack(p)
+
+        return (True, self.adapter.get_ofp_port_info(d, p.val))
+
+
+    def reconcile_device(self, device):
+        return self.adapter.reconcile_device(device)
+
+    def abandon_device(self, device):
+        return self.adapter.abandon_device(device)
+
+    def disable_device(self, device):
+        return self.adapter.disable_device(device)
+
+    def reenable_device(self, device):
+        return self.adapter.reenable_device(device)
+
+    def reboot_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.reboot_device(d))
+        else:
+            return (False, d)
+
+    def download_image(self, device, request):
+        return self.adapter.download_image(device, request)
+
+    def get_image_download_status(self, device, request):
+        return self.adapter.get_image_download_status(device, request)
+
+    def cancel_image_download(self, device, request):
+        return self.adapter.cancel_image_download(device, request)
+
+    def activate_image_update(self, device, request):
+        return self.adapter.activate_image_update(device, request)
+
+    def revert_image_update(self, device, request):
+        return self.adapter.revert_image_update(device, request)
+
+    def self_test(self, device):
+        return self.adapter.self_test_device(device)
+
+    def delete_device(self, device):
+        # Remove all child devices
+        self.delete_all_child_devices(device.id)
+
+        return self.adapter.delete_device(device)
+
+    def get_device_details(self, device):
+        return self.adapter.get_device_details(device)
+
+    def update_flows_bulk(self, device, flows, groups):
+        return self.adapter.update_flows_bulk(device, flows, groups)
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        return self.adapter.update_flows_incrementally(device, flow_changes, group_changes)
+
+    def suppress_alarm(self, filter):
+        return self.adapter.suppress_alarm(filter)
+
+    def unsuppress_alarm(self, filter):
+        return self.adapter.unsuppress_alarm(filter)
+
diff --git a/adapters/kafka/core_proxy.py b/adapters/kafka/core_proxy.py
new file mode 100644
index 0000000..bcc4239
--- /dev/null
+++ b/adapters/kafka/core_proxy.py
@@ -0,0 +1,331 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between CORE and an individual adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import structlog
+from google.protobuf.json_format import MessageToJson
+from scapy.packet import Packet
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python import failure
+from zope.interface import implementer
+
+from adapters.common.event_bus import EventBusClient
+from adapters.common.frameio.frameio import hexify
+from adapters.common.utils.id_generation import create_cluster_logical_device_ids
+from adapters.interface import IAdapterInterface
+from adapters.protos import third_party
+from adapters.protos.device_pb2 import Device, Port, PmConfigs
+from adapters.protos.events_pb2 import AlarmEvent, AlarmEventType, \
+    AlarmEventSeverity, AlarmEventState, AlarmEventCategory
+from adapters.protos.events_pb2 import KpiEvent
+from adapters.protos.voltha_pb2 import DeviceGroup, LogicalDevice, \
+    LogicalPort, AdminState, OperStatus, AlarmFilterRuleKey, CoreInstance
+from adapters.common.utils.registry import registry, IComponent
+from adapters.common.utils.id_generation import create_cluster_device_id
+import re
+from adapters.interface import ICoreSouthBoundInterface
+from adapters.protos.core_adapter_pb2 import StrType, BoolType, IntType
+from adapters.protos.common_pb2 import ID
+from google.protobuf.message import Message
+from adapters.common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
+
+log = structlog.get_logger()
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+def wrap_request(return_cls):
+    def real_wrapper(func):
+        @inlineCallbacks
+        def wrapper(*args, **kw):
+            try:
+                (success, d) = yield func(*args, **kw)
+                if success:
+                    log.debug("successful-response", func=func, val=d)
+                    if return_cls is not None:
+                        rc = return_cls()
+                        if d is not None:
+                            d.Unpack(rc)
+                        returnValue(rc)
+                    else:
+                        log.debug("successful-response-none", func=func,
+                                  val=None)
+                        returnValue(None)
+                else:
+                    log.warn("unsuccessful-request", func=func, args=args, kw=kw)
+                    returnValue(d)
+            except Exception as e:
+                log.exception("request-wrapper-exception", func=func, e=e)
+                raise
+        return wrapper
+    return real_wrapper
+
+
+@implementer(IComponent, ICoreSouthBoundInterface)
+class CoreProxy(object):
+
+    def __init__(self, kafka_proxy, core_topic, my_listening_topic):
+        self.kafka_proxy = kafka_proxy
+        self.listening_topic = my_listening_topic
+        self.core_topic = core_topic
+        self.default_timeout = 3
+
+    def start(self):
+        log.info('started')
+
+        return self
+
+    def stop(self):
+        log.info('stopped')
+
+    @inlineCallbacks
+    def invoke(self, rpc, to_topic=None, **kwargs):
+        @inlineCallbacks
+        def _send_request(rpc, m_callback,to_topic, **kwargs):
+            try:
+                log.debug("sending-request", rpc=rpc)
+                if to_topic is None:
+                    to_topic = self.core_topic
+                result = yield self.kafka_proxy.send_request(rpc=rpc,
+                                                             to_topic=to_topic,
+                                                             reply_topic=self.listening_topic,
+                                                             callback=None,
+                                                             **kwargs)
+                if not m_callback.called:
+                    m_callback.callback(result)
+                else:
+                    log.debug('timeout-already-occurred', rpc=rpc)
+            except Exception as e:
+                log.exception("Failure-sending-request", rpc=rpc, kw=kwargs)
+                if not m_callback.called:
+                    m_callback.errback(failure.Failure())
+
+        log.debug('invoke-request', rpc=rpc)
+        cb = DeferredWithTimeout(timeout=self.default_timeout)
+        _send_request(rpc, cb, to_topic, **kwargs)
+        try:
+            res = yield cb
+            returnValue(res)
+        except TimeOutError as e:
+            log.warn('invoke-timeout', e=e)
+            raise e
+
+
+    @wrap_request(CoreInstance)
+    @inlineCallbacks
+    def register(self, adapter):
+        log.debug("register")
+        try:
+            res = yield self.invoke(rpc="Register", adapter=adapter)
+            log.info("registration-returned", res=res)
+            returnValue(res)
+        except Exception as e:
+            log.exception("registration-exception", e=e)
+            raise
+
+    @wrap_request(Device)
+    @inlineCallbacks
+    def get_device(self, device_id):
+        log.debug("get-device")
+        id = ID()
+        id.id = device_id
+        res = yield self.invoke(rpc="GetDevice", device_id=id)
+        returnValue(res)
+
+    @wrap_request(Device)
+    @inlineCallbacks
+    def get_child_device(self, parent_device_id, **kwargs):
+        raise NotImplementedError()
+
+    # def add_device(self, device):
+    #     raise NotImplementedError()
+
+    def get_ports(self, device_id, port_type):
+        raise NotImplementedError()
+
+    def get_child_devices(self, parent_device_id):
+        raise NotImplementedError()
+
+    def get_child_device_with_proxy_address(self, proxy_address):
+        raise NotImplementedError()
+
+    def _to_proto(self, **kwargs):
+        encoded = {}
+        for k,v in kwargs.iteritems():
+            if isinstance(v, Message):
+                encoded[k] = v
+            elif type(v) == int:
+                i_proto = IntType()
+                i_proto.val = v
+                encoded[k] = i_proto
+            elif type(v) == str:
+                s_proto = StrType()
+                s_proto.val = v
+                encoded[k] = s_proto
+            elif type(v) == bool:
+                b_proto = BoolType()
+                b_proto.val = v
+                encoded[k] = b_proto
+        return encoded
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def child_device_detected(self,
+                              parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        id = ID()
+        id.id = parent_device_id
+        ppn = IntType()
+        ppn.val = parent_port_no
+        cdt = StrType()
+        cdt.val = child_device_type
+        channel = IntType()
+        channel.val = channel_id
+
+        args = self._to_proto(**kw)
+        res = yield self.invoke(rpc="ChildDeviceDetected",
+                                parent_device_id=id,
+                                parent_port_no = ppn,
+                                child_device_type= cdt,
+                                channel_id=channel,
+                                **args)
+        returnValue(res)
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_update(self, device):
+        log.debug("device_update")
+        res = yield self.invoke(rpc="DeviceUpdate", device=device)
+        returnValue(res)
+
+    def child_device_removed(parent_device_id, child_device_id):
+        raise NotImplementedError()
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_state_update(self, device_id,
+                                   oper_status=None,
+                                   connect_status=None):
+
+        id = ID()
+        id.id = device_id
+        o_status = IntType()
+        if oper_status:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+        a_status = IntType()
+
+        res = yield self.invoke(rpc="DeviceStateUpdate",
+                                device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status)
+        returnValue(res)
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def child_devices_state_update(self, parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None,
+                                   admin_state=None):
+
+        id = ID()
+        id.id = parent_device_id
+        o_status = IntType()
+        if oper_status:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+        a_status = IntType()
+        if admin_state:
+            a_status.val = admin_state
+        else:
+            a_status.val = -1
+
+        res = yield self.invoke(rpc="child_devices_state_update",
+                                parent_device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status,
+                                admin_state=a_status)
+        returnValue(res)
+
+
+    def child_devices_removed(parent_device_id):
+        raise NotImplementedError()
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_pm_config_update(self, device_pm_config, init=False):
+        log.debug("device_pm_config_update")
+        b = BoolType()
+        b.val = init
+        res = yield self.invoke(rpc="DevicePMConfigUpdate",
+                                device_pm_config=device_pm_config, init=b)
+        returnValue(res)
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def port_created(self, device_id, port):
+        log.debug("port_created")
+        proto_id = ID()
+        proto_id.id = device_id
+        res = yield self.invoke(rpc="PortCreated", device_id=proto_id, port=port)
+        returnValue(res)
+
+
+    def port_removed(device_id, port):
+        raise NotImplementedError()
+
+    def ports_enabled(device_id):
+        raise NotImplementedError()
+
+    def ports_disabled(device_id):
+        raise NotImplementedError()
+
+    def ports_oper_status_update(device_id, oper_status):
+        raise NotImplementedError()
+
+    def image_download_update(img_dnld):
+        raise NotImplementedError()
+
+    def image_download_deleted(img_dnld):
+        raise NotImplementedError()
+
+    def packet_in(device_id, egress_port_no, packet):
+        raise NotImplementedError()
diff --git a/adapters/kafka/event_bus_publisher.py b/adapters/kafka/event_bus_publisher.py
new file mode 100644
index 0000000..011fdea
--- /dev/null
+++ b/adapters/kafka/event_bus_publisher.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A gateway between the internal event bus and the Kafka publisher proxy
+to publish select topics and messages posted to the Voltha-internal event
+bus toward the external world.
+"""
+import structlog
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+
+from adapters.common.event_bus import EventBusClient
+
+log = structlog.get_logger()
+
+
+class EventBusPublisher(object):
+
+    def __init__(self, kafka_proxy, config):
+        self.kafka_proxy = kafka_proxy
+        self.config = config
+        self.topic_mappings = config.get('topic_mappings', {})
+        self.event_bus = EventBusClient()
+        self.subscriptions = None
+
+    def start(self):
+        log.debug('starting')
+        self.subscriptions = list()
+        self._setup_subscriptions(self.topic_mappings)
+        log.info('started')
+        return self
+
+    def stop(self):
+        try:
+            log.debug('stopping-event-bus')
+            if self.subscriptions:
+                for subscription in self.subscriptions:
+                    self.event_bus.unsubscribe(subscription)
+            log.info('stopped-event-bus')
+        except Exception, e:
+            log.exception('failed-stopping-event-bus', e=e)
+            return
+
+    def _setup_subscriptions(self, mappings):
+
+        for event_bus_topic, mapping in mappings.iteritems():
+
+            kafka_topic = mapping.get('kafka_topic', None)
+
+            if kafka_topic is None:
+                log.error('no-kafka-topic-in-config',
+                          event_bus_topic=event_bus_topic,
+                          mapping=mapping)
+                continue
+
+            self.subscriptions.append(self.event_bus.subscribe(
+                event_bus_topic,
+                # to avoid Python late-binding to the last registered
+                # kafka_topic, we force instant binding with the default arg
+                lambda _, m, k=kafka_topic: self.forward(k, m)))
+
+            log.info('event-to-kafka', kafka_topic=kafka_topic,
+                     event_bus_topic=event_bus_topic)
+
+    def forward(self, kafka_topic, msg):
+        try:
+            # convert to JSON string if msg is a protobuf msg
+            if isinstance(msg, Message):
+                msg = dumps(MessageToDict(msg, True, True))
+            log.debug('forward-event-bus-publisher')
+            self.kafka_proxy.send_message(kafka_topic, msg)
+        except Exception, e:
+            log.exception('failed-forward-event-bus-publisher', e=e)
+
diff --git a/adapters/kafka/kafka_inter_container_library.py b/adapters/kafka/kafka_inter_container_library.py
new file mode 100644
index 0000000..ad53812
--- /dev/null
+++ b/adapters/kafka/kafka_inter_container_library.py
@@ -0,0 +1,584 @@
+#!/usr/bin/env python
+
+from zope.interface import Interface, implementer
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, \
+    DeferredQueue, gatherResults
+from afkak.client import KafkaClient
+from afkak.consumer import OFFSET_LATEST, Consumer
+import structlog
+from adapters.common.utils import asleep
+from adapters.protos.core_adapter_pb2 import MessageType, Argument, \
+    InterContainerRequestBody, InterContainerMessage, Header, InterContainerResponseBody
+import time
+from uuid import uuid4
+from adapters.common.utils.registry import IComponent
+
+
+log = structlog.get_logger()
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+@implementer(IComponent)
+class IKafkaMessagingProxy(object):
+    _kafka_messaging_instance = None
+
+    def __init__(self,
+                 kafka_host_port,
+                 kv_store,
+                 default_topic,
+                 target_cls):
+        """
+        Initialize the kafka proxy.  This is a singleton (may change to
+        non-singleton if performance is better)
+        :param kafka_host_port: Kafka host and port
+        :param kv_store: Key-Value store
+        :param default_topic: Default topic to subscribe to
+        :param target_cls: target class - method of that class is invoked
+        when a message is received on the default_topic
+        """
+        # return an exception if the object already exist
+        if IKafkaMessagingProxy._kafka_messaging_instance:
+            raise Exception(
+                'Singleton-exist', cls=IKafkaMessagingProxy)
+
+        log.debug("Initializing-KafkaProxy")
+        self.kafka_host_port = kafka_host_port
+        self.kv_store = kv_store
+        self.default_topic = default_topic
+        self.target_cls = target_cls
+        self.topic_target_cls_map = {}
+        self.topic_consumer_map = {}
+        self.topic_callback_map = {}
+        self.subscribers = {}
+        self.kafka_client = None
+        self.kafka_proxy = None
+        self.transaction_id_deferred_map = {}
+        self.received_msg_queue = DeferredQueue()
+
+        self.init_time = 0
+        self.init_received_time = 0
+
+        self.init_resp_time = 0
+        self.init_received_resp_time = 0
+
+        self.num_messages = 0
+        self.total_time = 0
+        self.num_responses = 0
+        self.total_time_responses = 0
+        log.debug("KafkaProxy-initialized")
+
+    def start(self):
+        try:
+            # Create the kafka client
+            # assert self.kafka_host is not None
+            # assert self.kafka_port is not None
+            # kafka_host_port = ":".join((self.kafka_host, self.kafka_port))
+            self.kafka_client = KafkaClient(self.kafka_host_port)
+
+            # Get the kafka proxy instance.  If it does not exist then
+            # create it
+            self.kafka_proxy = get_kafka_proxy()
+            if self.kafka_proxy == None:
+                KafkaProxy(kafka_endpoint=self.kafka_host_port).start()
+                self.kafka_proxy = get_kafka_proxy()
+
+            # Subscribe the default topic and target_cls
+            self.topic_target_cls_map[self.default_topic] = self.target_cls
+
+            # Start the queue to handle incoming messages
+            reactor.callLater(0, self._received_message_processing_loop)
+
+            # Start listening for incoming messages
+            reactor.callLater(0, self.subscribe, self.default_topic,
+                              target_cls=self.target_cls)
+
+            # Setup the singleton instance
+            IKafkaMessagingProxy._kafka_messaging_instance = self
+        except Exception as e:
+            log.exception("Failed-to-start-proxy", e=e)
+
+
+    def stop(self):
+        """
+        Invoked to stop the kafka proxy
+        :return: None on success, Exception on failure
+        """
+        log.debug("Stopping-messaging-proxy ...")
+        try:
+            # Stop all the consumers
+            deferred_list = []
+            for key, values in self.topic_consumer_map.iteritems():
+                deferred_list.extend([c.stop() for c in values])
+
+            if not deferred_list:
+                d = gatherResults(deferred_list)
+                d.addCallback(lambda result: self.kafka_client.close())
+            log.debug("Messaging-proxy-stopped.")
+        except Exception as e:
+            log.exception("Exception-when-stopping-messaging-proxy:", e=e)
+
+
+    @inlineCallbacks
+    def _wait_until_topic_is_ready(self, client, topic):
+        e = True
+        while e:
+            yield client.load_metadata_for_topics(topic)
+            e = client.metadata_error_for_topic(topic)
+            if e:
+                log.debug("Topic-not-ready-retrying...", topic=topic)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.info('reconnected-to-consul', after_retries=self.retries)
+            self.retries = 0
+
+    @inlineCallbacks
+    def _subscribe(self, topic, callback=None, target_cls=None):
+        try:
+            yield self._wait_until_topic_is_ready(self.kafka_client, topic)
+            partitions = self.kafka_client.topic_partitions[topic]
+            consumers = []
+
+            # First setup the generic callback - all received messages will
+            # go through that queue
+            if topic not in self.topic_consumer_map:
+                consumers = [Consumer(self.kafka_client, topic, partition,
+                                      self._enqueue_received_message)
+                             for partition in partitions]
+                self.topic_consumer_map[topic] = consumers
+
+            log.debug("_subscribe", topic=topic, consumermap=self.topic_consumer_map)
+
+            if target_cls is not None and callback is None:
+                # Scenario #1
+                if topic not in self.topic_target_cls_map:
+                    self.topic_target_cls_map[topic] = target_cls
+            elif target_cls is None and callback is not None:
+                # Scenario #2
+                log.debug("custom-callback", topic=topic,
+                          callback_map=self.topic_callback_map)
+                if topic not in self.topic_callback_map:
+                    self.topic_callback_map[topic] = [callback]
+                else:
+                    self.topic_callback_map[topic].extend([callback])
+            else:
+                log.warn("invalid-parameters")
+
+            def cb_closed(result):
+                """
+                Called when a consumer cleanly stops.
+                """
+                log.debug("Consumers-cleanly-stopped")
+
+            def eb_failed(failure):
+                """
+                Called when a consumer fails due to an uncaught exception in the
+                processing callback or a network error on shutdown. In this case we
+                simply log the error.
+                """
+                log.warn("Consumers-failed", failure=failure)
+
+            for c in consumers:
+                c.start(OFFSET_LATEST).addCallbacks(cb_closed, eb_failed)
+
+            returnValue(True)
+        except Exception as e:
+            log.exception("Exception-during-subscription", e=e)
+            returnValue(False)
+
+    def subscribe(self, topic, callback=None, target_cls=None,
+                  max_retry=3):
+        """
+        Scenario 1:  invoked to subscribe to a specific topic with a
+        target_cls to invoke when a message is received on that topic.  This
+        handles the case of request/response where this library performs the
+        heavy lifting. In this case the m_callback must to be None
+
+        Scenario 2:  invoked to subscribe to a specific topic with a
+        specific callback to invoke when a message is received on that topic.
+        This handles the case where the caller wants to process the message
+        received itself. In this case the target_cls must to be None
+
+        :param topic: topic to subscribe to
+        :param callback: Callback to invoke when a message is received on
+        the topic. Either one of callback or target_cls needs can be none
+        :param target_cls:  Target class to use when a message is
+        received on the topic. There can only be 1 target_cls per topic.
+        Either one of callback or target_cls needs can be none
+        :param max_retry:  the number of retries before reporting failure
+        to subscribe.  This caters for scenario where the kafka topic is not
+        ready.
+        :return: True on success, False on failure
+        """
+        RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+        def _backoff(msg, retries):
+            wait_time = RETRY_BACKOFF[min(retries,
+                                          len(RETRY_BACKOFF) - 1)]
+            log.info(msg, retry_in=wait_time)
+            return asleep(wait_time)
+
+        retry = 0
+        while not self._subscribe(topic, callback=callback,
+                                  target_cls=target_cls):
+            if retry > max_retry:
+                return False
+            else:
+                _backoff("subscription-not-complete", retry)
+                retry += 1
+        return True
+
+    def unsubscribe(self, topic):
+        """
+        Invoked when unsubscribing to a topic
+        :param topic: topic to unsubscibe from
+        :return: None on success or Exception on failure
+        """
+        log.debug("Unsubscribing-to-topic", topic=topic)
+
+        def remove_topic(topic):
+            if topic in self.topic_consumer_map:
+                del self.topic_consumer_map[topic]
+
+        try:
+            if topic in self.topic_consumer_map:
+                consumers = self.topic_consumer_map[topic]
+                d = gatherResults([c.stop() for c in consumers])
+                d.addCallback(remove_topic, topic)
+                log.debug("Unsubscribed-to-topic.", topic=topic)
+            else:
+                log.debug("Topic-does-not-exist.", topic=topic)
+        except Exception as e:
+            log.exception("Exception-when-stopping-messaging-proxy:", e=e)
+
+    @inlineCallbacks
+    def _enqueue_received_message(self, reactor, message_list):
+        """
+        Internal method to continuously queue all received messaged
+        irrespective of topic
+        :param reactor: A requirement by the Twisted Python kafka library
+        :param message_list: Received list of messages
+        :return: None on success, Exception on failure
+        """
+        try:
+            for m in message_list:
+                log.debug("received-msg", msg=m)
+                yield self.received_msg_queue.put(m)
+        except Exception as e:
+            log.exception("Failed-enqueueing-received-message", e=e)
+
+    @inlineCallbacks
+    def _received_message_processing_loop(self):
+        """
+        Internal method to continuously process all received messages one
+        at a time
+        :return: None on success, Exception on failure
+        """
+        while True:
+            try:
+                message = yield self.received_msg_queue.get()
+                yield self._process_message(message)
+            except Exception as e:
+                log.exception("Failed-dequeueing-received-message", e=e)
+
+    def _to_string(self, unicode_str):
+        if unicode_str is not None:
+            if type(unicode_str) == unicode:
+                return unicode_str.encode('ascii', 'ignore')
+            else:
+                return unicode_str
+        else:
+            return None
+
+    def _format_request(self,
+                        rpc,
+                        to_topic,
+                        reply_topic,
+                        **kwargs):
+        """
+        Format a request to send over kafka
+        :param rpc: Requested remote API
+        :param to_topic: Topic to send the request
+        :param reply_topic: Topic to receive the resulting response, if any
+        :param kwargs: Dictionary of key-value pairs to pass as arguments to
+        the remote rpc API.
+        :return: A InterContainerMessage message type on success or None on
+        failure
+        """
+        try:
+            transaction_id = uuid4().hex
+            request = InterContainerMessage()
+            request_body = InterContainerRequestBody()
+            request.header.id = transaction_id
+            request.header.type = MessageType.Value("REQUEST")
+            request.header.from_topic = self.default_topic
+            request.header.to_topic = to_topic
+
+            response_required = False
+            if reply_topic:
+                request_body.reply_to_topic = reply_topic
+                response_required = True
+
+            request.header.timestamp = int(round(time.time() * 1000))
+            request_body.rpc = rpc
+            for a, b in kwargs.iteritems():
+                arg = Argument()
+                arg.key = a
+                try:
+                    arg.value.Pack(b)
+                    request_body.args.extend([arg])
+                except Exception as e:
+                    log.exception("Failed-parsing-value", e=e)
+            request_body.reply_to_topic = self.default_topic
+            request_body.response_required = response_required
+            request.body.Pack(request_body)
+            return request, transaction_id, response_required
+        except Exception as e:
+            log.exception("formatting-request-failed",
+                          rpc=rpc,
+                          to_topic=to_topic,
+                          reply_topic=reply_topic,
+                          args=kwargs)
+            return None, None, None
+
+    def _format_response(self, msg_header, msg_body, status):
+        """
+        Format a response
+        :param msg_header: The header portion of a received request
+        :param msg_body: The response body
+        :param status: True is this represents a successful response
+        :return: a InterContainerMessage message type
+        """
+        try:
+            assert isinstance(msg_header, Header)
+            response = InterContainerMessage()
+            response_body = InterContainerResponseBody()
+            response.header.id = msg_header.id
+            response.header.timestamp = int(
+                round(time.time() * 1000))
+            response.header.type = MessageType.Value("RESPONSE")
+            response.header.from_topic = msg_header.to_topic
+            response.header.to_topic = msg_header.from_topic
+            if msg_body is not None:
+                response_body.result.Pack(msg_body)
+            response_body.success = status
+            response.body.Pack(response_body)
+            return response
+        except Exception as e:
+            log.exception("formatting-response-failed", header=msg_header,
+                          body=msg_body, status=status, e=e)
+            return None
+
+    def _parse_response(self, msg):
+        try:
+            message = InterContainerMessage()
+            message.ParseFromString(msg)
+            resp = InterContainerResponseBody()
+            if message.body.Is(InterContainerResponseBody.DESCRIPTOR):
+                message.body.Unpack(resp)
+            else:
+                log.debug("unsupported-msg", msg_type=type(message.body))
+                return None
+            log.debug("parsed-response", input=message, output=resp)
+            return resp
+        except Exception as e:
+            log.exception("parsing-response-failed", msg=msg, e=e)
+            return None
+
+    @inlineCallbacks
+    def _process_message(self, m):
+        """
+        Default internal method invoked for every batch of messages received
+        from Kafka.
+        """
+        def _toDict(args):
+            """
+            Convert a repeatable Argument type into a python dictionary
+            :param args: Repeatable core_adapter.Argument type
+            :return: a python dictionary
+            """
+            if args is None:
+                return None
+            result = {}
+            for arg in args:
+                assert isinstance(arg, Argument)
+                result[arg.key] = arg.value
+            return result
+
+        current_time = int(round(time.time() * 1000))
+        # log.debug("Got Message", message=m)
+        try:
+            val = m.message.value
+            # print m.topic
+
+            # Go over customized callbacks first
+            if m.topic in self.topic_callback_map:
+                for c in self.topic_callback_map[m.topic]:
+                    yield c(val)
+
+            #  Check whether we need to process request/response scenario
+            if m.topic not in self.topic_target_cls_map:
+                return
+
+            # Process request/response scenario
+            message = InterContainerMessage()
+            message.ParseFromString(val)
+
+            if message.header.type == MessageType.Value("REQUEST"):
+                # if self.num_messages == 0:
+                #     self.init_time = int(round(time.time() * 1000))
+                #     self.init_received_time = message.header.timestamp
+                #     log.debug("INIT_TIME", time=self.init_time,
+                #               time_sent=message.header.timestamp)
+                # self.num_messages = self.num_messages + 1
+                #
+                # self.total_time = self.total_time + current_time - message.header.timestamp
+                #
+                # if self.num_messages % 10 == 0:
+                #     log.debug("TOTAL_TIME ...",
+                #               num=self.num_messages,
+                #               total=self.total_time,
+                #               duration=current_time - self.init_time,
+                #               time_since_first_msg=current_time - self.init_received_time,
+                #               average=self.total_time / 10)
+                #     self.total_time = 0
+
+                # Get the target class for that specific topic
+                targetted_topic = self._to_string(message.header.to_topic)
+                msg_body = InterContainerRequestBody()
+                if message.body.Is(InterContainerRequestBody.DESCRIPTOR):
+                    message.body.Unpack(msg_body)
+                else:
+                    log.debug("unsupported-msg", msg_type=type(message.body))
+                    return
+                if targetted_topic in self.topic_target_cls_map:
+                    if msg_body.args:
+                        log.debug("message-body-args-present", body=msg_body)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))(
+                            **_toDict(msg_body.args))
+                    else:
+                        log.debug("message-body-args-absent", body=msg_body,
+                                  rpc=msg_body.rpc)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))()
+                    if msg_body.response_required:
+                        response = self._format_response(
+                            msg_header=message.header,
+                            msg_body=res,
+                            status=status,
+                        )
+                        if response is not None:
+                            res_topic = self._to_string(
+                                response.header.to_topic)
+                            self._send_kafka_message(res_topic, response)
+
+                        log.debug("Response-sent", response=response.body)
+            elif message.header.type == MessageType.Value("RESPONSE"):
+                trns_id = self._to_string(message.header.id)
+                if trns_id in self.transaction_id_deferred_map:
+                    # self.num_responses = self.num_responses + 1
+                    # self.total_time_responses = self.total_time_responses + current_time - \
+                    #                             message.header.timestamp
+                    # if self.num_responses % 10 == 0:
+                    #     log.debug("TOTAL RESPONSES ...",
+                    #               num=self.num_responses,
+                    #               total=self.total_time_responses,
+                    #               average=self.total_time_responses / 10)
+                    #     self.total_time_responses = 0
+
+                    resp = self._parse_response(val)
+
+                    self.transaction_id_deferred_map[trns_id].callback(resp)
+            else:
+                log.error("!!INVALID-TRANSACTION-TYPE!!")
+
+        except Exception as e:
+            log.exception("Failed-to-process-message", message=m, e=e)
+
+    @inlineCallbacks
+    def _send_kafka_message(self, topic, msg):
+        try:
+            yield self.kafka_proxy.send_message(topic, msg.SerializeToString())
+        except Exception, e:
+            log.exception("Failed-sending-message", message=msg, e=e)
+
+    @inlineCallbacks
+    def send_request(self,
+                     rpc,
+                     to_topic,
+                     reply_topic=None,
+                     callback=None,
+                     **kwargs):
+        """
+        Invoked to send a message to a remote container and receive a
+        response if required.
+        :param rpc: The remote API to invoke
+        :param to_topic: Send the message to this kafka topic
+        :param reply_topic: If not None then a response is expected on this
+        topic.  If set to None then no response is required.
+        :param callback: Callback to invoke when a response is received.
+        :param kwargs: Key-value pairs representing arguments to pass to the
+        rpc remote API.
+        :return: Either no response is required, or a response is returned
+        via the callback or the response is a tuple of (status, return_cls)
+        """
+        try:
+            # Ensure all strings are not unicode encoded
+            rpc = self._to_string(rpc)
+            to_topic = self._to_string(to_topic)
+            reply_topic = self._to_string(reply_topic)
+
+            request, transaction_id, response_required = \
+                self._format_request(
+                    rpc=rpc,
+                    to_topic=to_topic,
+                    reply_topic=reply_topic,
+                    **kwargs)
+
+            if request is None:
+                return
+
+            # Add the transaction to the transaction map before sending the
+            # request.  This will guarantee the eventual response will be
+            # processed.
+            wait_for_result = None
+            if response_required:
+                wait_for_result = Deferred()
+                self.transaction_id_deferred_map[
+                    self._to_string(request.header.id)] = wait_for_result
+
+            log.debug("BEFORE-SENDING", to_topic=to_topic, from_topic=reply_topic)
+            yield self._send_kafka_message(to_topic, request)
+            log.debug("AFTER-SENDING", to_topic=to_topic, from_topic=reply_topic)
+
+            if response_required:
+                res = yield wait_for_result
+
+                if res is None or not res.success:
+                    raise KafkaMessagingError(error="Failed-response:{"
+                                                    "}".format(res))
+
+                # Remove the transaction from the transaction map
+                del self.transaction_id_deferred_map[transaction_id]
+
+                log.debug("send-message-response", rpc=rpc, result=res)
+
+                if callback:
+                    callback((res.success, res.result))
+                else:
+                    returnValue((res.success, res.result))
+        except Exception as e:
+            log.exception("Exception-sending-request", e=e)
+            raise KafkaMessagingError(error=e)
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_messaging_proxy():
+    return IKafkaMessagingProxy._kafka_messaging_instance
diff --git a/adapters/kafka/kafka_proxy.py b/adapters/kafka/kafka_proxy.py
new file mode 100644
index 0000000..10fdbf8
--- /dev/null
+++ b/adapters/kafka/kafka_proxy.py
@@ -0,0 +1,209 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from afkak.client import KafkaClient as _KafkaClient
+from afkak.common import (
+    PRODUCER_ACK_LOCAL_WRITE, PRODUCER_ACK_NOT_REQUIRED
+)
+from afkak.producer import Producer as _kafkaProducer
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from zope.interface import implementer
+
+from adapters.common.utils.consulhelpers import get_endpoint_from_consul
+from adapters.kafka.event_bus_publisher import EventBusPublisher
+from adapters.common.utils.registry import IComponent
+import time
+
+log = get_logger()
+
+
+@implementer(IComponent)
+class KafkaProxy(object):
+    """
+    This is a singleton proxy kafka class to hide the kafka client details.
+    """
+    _kafka_instance = None
+
+    def __init__(self,
+                 consul_endpoint='localhost:8500',
+                 kafka_endpoint='localhost:9092',
+                 ack_timeout=1000,
+                 max_req_attempts=10,
+                 config={}):
+
+        # return an exception if the object already exist
+        if KafkaProxy._kafka_instance:
+            raise Exception('Singleton exist for :{}'.format(KafkaProxy))
+
+        log.debug('initializing', endpoint=kafka_endpoint)
+        self.ack_timeout = ack_timeout
+        self.max_req_attempts = max_req_attempts
+        self.consul_endpoint = consul_endpoint
+        self.kafka_endpoint = kafka_endpoint
+        self.config = config
+        self.kclient = None
+        self.kproducer = None
+        self.event_bus_publisher = None
+        self.stopping = False
+        self.faulty = False
+        log.debug('initialized', endpoint=kafka_endpoint)
+
+    @inlineCallbacks
+    def start(self):
+        log.debug('starting')
+        self._get_kafka_producer()
+        KafkaProxy._kafka_instance = self
+        self.event_bus_publisher = yield EventBusPublisher(
+            self, self.config.get('event_bus_publisher', {})).start()
+        log.info('started')
+        KafkaProxy.faulty = False
+        self.stopping = False
+        returnValue(self)
+
+    @inlineCallbacks
+    def stop(self):
+        try:
+            log.debug('stopping-kafka-proxy')
+            try:
+                if self.kclient:
+                    yield self.kclient.close()
+                    self.kclient = None
+                    log.debug('stopped-kclient-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kclient-kafka-proxy', e=e)
+                pass
+
+            try:
+                if self.kproducer:
+                    yield self.kproducer.stop()
+                    self.kproducer = None
+                    log.debug('stopped-kproducer-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kproducer-kafka-proxy', e=e)
+                pass
+
+            #try:
+            #    if self.event_bus_publisher:
+            #        yield self.event_bus_publisher.stop()
+            #        self.event_bus_publisher = None
+            #        log.debug('stopped-event-bus-publisher-kafka-proxy')
+            #except Exception, e:
+            #    log.debug('failed-stopped-event-bus-publisher-kafka-proxy')
+            #    pass
+
+            log.debug('stopped-kafka-proxy')
+
+        except Exception, e:
+            self.kclient = None
+            self.kproducer = None
+            #self.event_bus_publisher = None
+            log.exception('failed-stopped-kafka-proxy', e=e)
+            pass
+
+    def _get_kafka_producer(self):
+        # PRODUCER_ACK_LOCAL_WRITE : server will wait till the data is written
+        #  to a local log before sending response
+        try:
+
+            if self.kafka_endpoint.startswith('@'):
+                try:
+                    _k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
+                                                           self.kafka_endpoint[1:])
+                    log.debug('found-kafka-service', endpoint=_k_endpoint)
+
+                except Exception as e:
+                    log.exception('no-kafka-service-in-consul', e=e)
+
+                    self.kproducer = None
+                    self.kclient = None
+                    return
+            else:
+                _k_endpoint = self.kafka_endpoint
+
+            self.kclient = _KafkaClient(_k_endpoint)
+            self.kproducer = _kafkaProducer(self.kclient,
+                                            req_acks=PRODUCER_ACK_NOT_REQUIRED,
+                                            # req_acks=PRODUCER_ACK_LOCAL_WRITE,
+                                            # ack_timeout=self.ack_timeout,
+                                            # max_req_attempts=self.max_req_attempts)
+                                            )
+        except Exception, e:
+            log.exception('failed-get-kafka-producer', e=e)
+            return
+
+    @inlineCallbacks
+    def send_message(self, topic, msg):
+        assert topic is not None
+        assert msg is not None
+
+        # first check whether we have a kafka producer.  If there is none
+        # then try to get one - this happens only when we try to lookup the
+        # kafka service from consul
+        try:
+            if self.faulty is False:
+
+                if self.kproducer is None:
+                    self._get_kafka_producer()
+                    # Lets the next message request do the retry if still a failure
+                    if self.kproducer is None:
+                        log.error('no-kafka-producer', endpoint=self.kafka_endpoint)
+                        return
+
+                # log.debug('sending-kafka-msg', topic=topic, msg=msg)
+                msgs = [msg]
+
+                if self.kproducer and self.kclient and \
+                        self.event_bus_publisher and self.faulty is False:
+                    # log.debug('sending-kafka-msg-I-am-here0', time=int(round(time.time() * 1000)))
+
+                    yield self.kproducer.send_messages(topic, msgs=msgs)
+                    # self.kproducer.send_messages(topic, msgs=msgs)
+                    # log.debug('sent-kafka-msg', topic=topic, msg=msg)
+                else:
+                    return
+
+        except Exception, e:
+            self.faulty = True
+            log.error('failed-to-send-kafka-msg', topic=topic, msg=msg, e=e)
+
+            # set the kafka producer to None.  This is needed if the
+            # kafka docker went down and comes back up with a different
+            # port number.
+            if self.stopping is False:
+                log.debug('stopping-kafka-proxy')
+                try:
+                    self.stopping = True
+                    self.stop()
+                    self.stopping = False
+                    self.faulty = False
+                    log.debug('stopped-kafka-proxy')
+                except Exception, e:
+                    log.exception('failed-stopping-kafka-proxy', e=e)
+                    pass
+            else:
+                log.info('already-stopping-kafka-proxy')
+
+            return
+
+    def is_faulty(self):
+        return self.faulty
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_kafka_proxy():
+    return KafkaProxy._kafka_instance
+
diff --git a/adapters/ponsim_olt/VERSION b/adapters/ponsim_olt/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/adapters/ponsim_olt/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/adapters/ponsim_olt/__init__.py b/adapters/ponsim_olt/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/ponsim_olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/ponsim_olt/main.py b/adapters/ponsim_olt/main.py
new file mode 100755
index 0000000..53745ee
--- /dev/null
+++ b/adapters/ponsim_olt/main.py
@@ -0,0 +1,487 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim OLT Adapter main entry point"""
+
+import argparse
+import arrow
+import os
+import time
+
+import yaml
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+from adapters.protos import third_party
+from adapters.common.structlog_setup import setup_logging, update_logging
+from adapters.common.utils.dockerhelpers import get_my_containers_name
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adapters.common.utils.registry import registry, IComponent
+from packaging.version import Version
+from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, get_messaging_proxy
+from adapters.ponsim_olt.ponsim_olt import PonSimOltAdapter
+from adapters.protos.adapter_pb2 import AdapterConfig, Adapter
+from adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from adapters.kafka.core_proxy import CoreProxy
+from adapters.common.utils.deferred_utils import TimeOutError
+from adapters.common.utils.asleep import asleep
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './ponsim_olt.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'ponsim_olt'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'ponsim_olt'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to ponsim_onu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of ponsim adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+    log.info(' ____                 _              ___  _   _____   ')
+    log.info('|  _ \ ___  _ __  ___(_)_ __ ___    / _ \| | |_   _|  ')
+    log.info('| |_) / _ \| \'_ \/ __| | \'_ ` _ \  | | | | |   | |  ')
+    log.info('|  __/ (_) | | | \__ \ | | | | | | | |_| | |___| |    ')
+    log.info('|_|   \___/|_| |_|___/_|_| |_| |_|  \___/|_____|_|    ')
+    log.info('                                                      ')
+    log.info('   _       _             _                            ')
+    log.info('  / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|             ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                  ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                  ')
+    log.info('                   |_|                                ')
+    log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.ponsim_olt_adapter_version = self.get_version()
+        self.log.info('Ponsim-OLT-Adapter-Version', version=
+        self.ponsim_olt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            ponsim_olt_adapter = PonSimOltAdapter(
+                adapter_agent=self.core_proxy, config=config)
+            ponsim_request_handler = AdapterRequestFacade(
+                adapter=ponsim_olt_adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    # Needs to assign a real class
+                    target_cls=ponsim_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        # Send registration to Core with adapter specs
+        adapter = Adapter()
+        adapter.id =  self.args.name
+        adapter.vendor = self.args.name
+        adapter.version = self.ponsim_olt_adapter_version
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(adapter)
+                self.log.info('registration-response', response=resp)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/adapters/ponsim_olt/ponsim_olt.py b/adapters/ponsim_olt/ponsim_olt.py
new file mode 100644
index 0000000..5e096b4
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.py
@@ -0,0 +1,755 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Fully simulated OLT adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import adapters.common.openflow.utils as fd
+import grpc
+import structlog
+from scapy.layers.l2 import Ether, Dot1Q
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks
+from grpc._channel import _Rendezvous
+
+from adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from adapters.common.utils.asleep import asleep
+from twisted.internet.task import LoopingCall
+from adapters.iadapter import OltAdapter
+from adapters.protos import third_party
+from adapters.protos import openflow_13_pb2 as ofp
+from adapters.protos import ponsim_pb2
+from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
+from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from google.protobuf.empty_pb2 import Empty
+from adapters.protos.logical_device_pb2 import LogicalPort, LogicalDevice
+from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD, \
+    OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
+    ofp_switch_features, ofp_desc
+from adapters.protos.openflow_13_pb2 import ofp_port
+from adapters.protos.ponsim_pb2 import FlowTable, PonSimFrame
+from adapters.protos.core_adapter_pb2 import SwitchCapability, PortCapability
+from adapters.common.utils.registry import registry
+
+_ = third_party
+log = structlog.get_logger()
+
+PACKET_IN_VLAN = 4000
+is_inband_frame = BpfProgramFilter('(ether[14:2] & 0xfff) = 0x{:03x}'.format(
+    PACKET_IN_VLAN))
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+class AdapterPmMetrics:
+    def __init__(self, device):
+        self.pm_names = {'tx_64_pkts', 'tx_65_127_pkts', 'tx_128_255_pkts',
+                         'tx_256_511_pkts', 'tx_512_1023_pkts',
+                         'tx_1024_1518_pkts', 'tx_1519_9k_pkts',
+                         'rx_64_pkts', 'rx_65_127_pkts',
+                         'rx_128_255_pkts', 'rx_256_511_pkts',
+                         'rx_512_1023_pkts', 'rx_1024_1518_pkts',
+                         'rx_1519_9k_pkts'}
+        self.device = device
+        self.id = device.id
+        self.name = 'ponsim_olt'
+        self.default_freq = 150
+        self.grouped = False
+        self.freq_override = False
+        self.pon_metrics_config = dict()
+        self.nni_metrics_config = dict()
+        self.lc = None
+        for m in self.pm_names:
+            self.pon_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+            self.nni_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+
+    def update(self, pm_config):
+        if self.default_freq != pm_config.default_freq:
+            # Update the callback to the new frequency.
+            self.default_freq = pm_config.default_freq
+            self.lc.stop()
+            self.lc.start(interval=self.default_freq / 10)
+        for m in pm_config.metrics:
+            self.pon_metrics_config[m.name].enabled = m.enabled
+            self.nni_metrics_config[m.name].enabled = m.enabled
+
+    def make_proto(self):
+        pm_config = PmConfigs(
+            id=self.id,
+            default_freq=self.default_freq,
+            grouped=False,
+            freq_override=False)
+        for m in sorted(self.pon_metrics_config):
+            pm = self.pon_metrics_config[m]  # Either will do they're the same
+            pm_config.metrics.extend([PmConfig(name=pm.name,
+                                               type=pm.type,
+                                               enabled=pm.enabled)])
+        return pm_config
+
+    def collect_port_metrics(self, channel):
+        rtrn_port_metrics = dict()
+        stub = ponsim_pb2.PonSimStub(channel)
+        stats = stub.GetStats(Empty())
+        rtrn_port_metrics['pon'] = self.extract_pon_metrics(stats)
+        rtrn_port_metrics['nni'] = self.extract_nni_metrics(stats)
+        return rtrn_port_metrics
+
+    def extract_pon_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "pon":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def extract_nni_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "nni":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def start_collector(self, callback):
+        log.info("starting-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
+        self.lc = LoopingCall(callback, self.device.id, prefix)
+        self.lc.start(interval=self.default_freq / 10)
+
+    def stop_collector(self):
+        log.info("stopping-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        self.lc.stop()
+
+
+class AdapterAlarms:
+    def __init__(self, adapter, device):
+        self.adapter = adapter
+        self.device = device
+        self.lc = None
+
+    def send_alarm(self, context_data, alarm_data):
+        try:
+            current_context = {}
+            for key, value in context_data.__dict__.items():
+                current_context[key] = str(value)
+
+            alarm_event = self.adapter.adapter_agent.create_alarm(
+                resource_id=self.device.id,
+                description="{}.{} - {}".format(self.adapter.name,
+                                                self.device.id,
+                                                alarm_data[
+                                                    'description']) if 'description' in alarm_data else None,
+                type=alarm_data['type'] if 'type' in alarm_data else None,
+                category=alarm_data[
+                    'category'] if 'category' in alarm_data else None,
+                severity=alarm_data[
+                    'severity'] if 'severity' in alarm_data else None,
+                state=alarm_data['state'] if 'state' in alarm_data else None,
+                raised_ts=alarm_data['ts'] if 'ts' in alarm_data else 0,
+                context=current_context
+            )
+
+            self.adapter.adapter_agent.submit_alarm(self.device.id,
+                                                    alarm_event)
+
+        except Exception as e:
+            log.exception('failed-to-send-alarm', e=e)
+
+
+class PonSimOltAdapter(OltAdapter):
+    def __init__(self, adapter_agent, config):
+        super(PonSimOltAdapter, self).__init__(adapter_agent=adapter_agent,
+                                               config=config,
+                                               device_handler_class=PonSimOltHandler,
+                                               name='ponsim_olt',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='ponsim_olt',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+
+
+class PonSimOltHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.channel = None
+        self.io_port = None
+        self.logical_device_id = None
+        self.nni_port = None
+        self.ofp_port_no = None
+        self.interface = registry('main').get_args().interface
+        self.pm_metrics = None
+        self.alarms = None
+        self.frames = None
+
+    @inlineCallbacks
+    def get_channel(self):
+        if self.channel is None:
+            try:
+                device = yield self.adapter_agent.get_device(self.device_id)
+                self.log.info('device-info', device=device, host_port=device.host_and_port)
+                self.channel = grpc.insecure_channel(device.host_and_port)
+            except Exception as e:
+                    log.exception("ponsim-connection-failure", e=e)
+
+        # returnValue(self.channel)
+
+    def close_channel(self):
+        if self.channel is None:
+            self.log.info('grpc-channel-already-closed')
+            return
+        else:
+            if self.frames is not None:
+                self.frames.cancel()
+                self.frames = None
+                self.log.info('cancelled-grpc-frame-stream')
+
+            self.channel.unsubscribe(lambda *args: None)
+            self.channel = None
+
+            self.log.info('grpc-channel-closed')
+
+    def _get_nni_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_NNI)
+        if ports:
+            # For now, we use on one NNI port
+            return ports[0]
+
+    @inlineCallbacks
+    def activate(self, device):
+        try:
+            self.log.info('activating')
+
+            if not device.host_and_port:
+                device.oper_status = OperStatus.FAILED
+                device.reason = 'No host_and_port field provided'
+                self.adapter_agent.device_update(device)
+                return
+
+            yield self.get_channel()
+            stub = ponsim_pb2.PonSimStub(self.channel)
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info, device_id=device.id)
+            self.ofp_port_no = info.nni_port
+
+            device.root = True
+            device.vendor = 'ponsim'
+            device.model = 'n/a'
+            device.serial_number = device.host_and_port
+            device.connect_status = ConnectStatus.REACHABLE
+            yield self.adapter_agent.device_update(device)
+
+            # Now set the initial PM configuration for this device
+            self.pm_metrics = AdapterPmMetrics(device)
+            pm_config = self.pm_metrics.make_proto()
+            log.info("initial-pm-config", pm_config=pm_config)
+            self.adapter_agent.device_pm_config_update(pm_config, init=True)
+
+            # Setup alarm handler
+            self.alarms = AdapterAlarms(self.adapter, device)
+
+            nni_port = Port(
+                port_no=info.nni_port,
+                label='NNI facing Ethernet port',
+                type=Port.ETHERNET_NNI,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            )
+            self.nni_port = nni_port
+            yield self.adapter_agent.port_created(device.id, nni_port)
+            yield self.adapter_agent.port_created(device.id, Port(
+                port_no=1,
+                label='PON port',
+                type=Port.PON_OLT,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            ))
+            yield self.adapter_agent.device_state_update(device.id, oper_status=OperStatus.ACTIVE)
+
+            # register ONUS
+            self.log.info('onu-found', onus=info.onus, len=len(info.onus))
+            for onu in info.onus:
+                vlan_id = onu.uni_port
+                yield self.adapter_agent.child_device_detected(
+                    parent_device_id=device.id,
+                    parent_port_no=1,
+                    child_device_type='ponsim_onu',
+                    channel_id=vlan_id,
+                )
+
+            self.log.info('starting-frame-grpc-stream')
+            reactor.callInThread(self.rcv_grpc)
+            self.log.info('started-frame-grpc-stream')
+
+            # TODO
+            # Start collecting stats from the device after a brief pause
+            # self.start_kpi_collection(device.id)
+        except Exception as e:
+            log.exception("Exception-activating", e=e)
+
+
+    def get_ofp_device_info(self, device):
+        return SwitchCapability(
+            desc=ofp_desc(
+                hw_desc='ponsim pon',
+                sw_desc='ponsim pon',
+                serial_num=device.serial_number,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                    OFPC_FLOW_STATS
+                    | OFPC_TABLE_STATS
+                    | OFPC_PORT_STATS
+                    | OFPC_GROUP_STATS
+                )
+            )
+        )
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no, info=self.ofp_port_no, device_id=device.id)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port = LogicalPort (
+                id='nni',
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                    name='nni',
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                )
+            )
+        )
+
+    def reconcile(self, device):
+        self.log.info('reconciling-OLT-device-starts')
+
+        if not device.host_and_port:
+            device.oper_status = OperStatus.FAILED
+            device.reason = 'No host_and_port field provided'
+            self.adapter_agent.device_update(device)
+            return
+
+        try:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            # TODO: Verify we are connected to the same device we are
+            # reconciling - not much data in ponsim to differentiate at the
+            # time
+            device.oper_status = OperStatus.ACTIVE
+            self.adapter_agent.device_update(device)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+        except Exception, e:
+            log.exception('device-unreachable', e=e)
+            device.connect_status = ConnectStatus.UNREACHABLE
+            device.oper_status = OperStatus.UNKNOWN
+            self.adapter_agent.device_update(device)
+            return
+
+        # Now set the initial PM configuration for this device
+        self.pm_metrics = AdapterPmMetrics(device)
+        pm_config = self.pm_metrics.make_proto()
+        log.info("initial-pm-config", pm_config=pm_config)
+        self.adapter_agent.device_update_pm_config(pm_config, init=True)
+
+        # Setup alarm handler
+        self.alarms = AdapterAlarms(self.adapter, device)
+
+        # TODO: Is there anything required to verify nni and PON ports
+
+        # Set the logical device id
+        device = self.adapter_agent.get_device(device.id)
+        if device.parent_id:
+            self.logical_device_id = device.parent_id
+            self.adapter_agent.reconcile_logical_device(device.parent_id)
+        else:
+            self.log.info('no-logical-device-set')
+
+        # Reconcile child devices
+        self.adapter_agent.reconcile_child_devices(device.id)
+
+        reactor.callInThread(self.rcv_grpc)
+
+        # Start collecting stats from the device after a brief pause
+        self.start_kpi_collection(device.id)
+
+        self.log.info('reconciling-OLT-device-ends')
+
+    @inlineCallbacks
+    def rcv_grpc(self):
+        """
+        This call establishes a GRPC stream to receive frames.
+        """
+        yield self.get_channel()
+        stub = ponsim_pb2.PonSimStub(self.channel)
+        # stub = ponsim_pb2.PonSimStub(self.get_channel())
+
+        # Attempt to establish a grpc stream with the remote ponsim service
+        self.frames = stub.ReceiveFrames(Empty())
+
+        self.log.info('start-receiving-grpc-frames')
+
+        try:
+            for frame in self.frames:
+                self.log.info('received-grpc-frame',
+                              frame_len=len(frame.payload))
+                self._rcv_frame(frame.payload)
+
+        except _Rendezvous, e:
+            log.warn('grpc-connection-lost', message=e.message)
+
+        self.log.info('stopped-receiving-grpc-frames')
+
+
+    # VOLTHA's flow decomposition removes the information about which flows
+    # are trap flows where traffic should be forwarded to the controller.
+    # We'll go through the flows and change the output port of flows that we
+    # know to be trap flows to the OF CONTROLLER port.
+    def update_flow_table(self, flows):
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        self.log.info('pushing-olt-flow-table')
+        for flow in flows:
+            classifier_info = {}
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.ETH_TYPE:
+                    classifier_info['eth_type'] = field.eth_type
+                    self.log.debug('field-type-eth-type',
+                                   eth_type=classifier_info['eth_type'])
+                elif field.type == fd.IP_PROTO:
+                    classifier_info['ip_proto'] = field.ip_proto
+                    self.log.debug('field-type-ip-proto',
+                                   ip_proto=classifier_info['ip_proto'])
+            if ('ip_proto' in classifier_info and (
+                    classifier_info['ip_proto'] == 17 or
+                    classifier_info['ip_proto'] == 2)) or (
+                    'eth_type' in classifier_info and
+                    classifier_info['eth_type'] == 0x888e):
+                for action in fd.get_actions(flow):
+                    if action.type == ofp.OFPAT_OUTPUT:
+                        action.output.port = ofp.OFPP_CONTROLLER
+            self.log.info('out_port', out_port=fd.get_out_port(flow))
+
+        stub.UpdateFlowTable(FlowTable(
+            port=0,
+            flows=flows
+        ))
+        self.log.info('success')
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def update_pm_config(self, device, pm_config):
+        log.info("handler-update-pm-config", device=device,
+                 pm_config=pm_config)
+        self.pm_metrics.update(pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        self.log.info('sending-proxied-message')
+        if isinstance(msg, FlowTable):
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            self.log.info('pushing-onu-flow-table', port=msg.port)
+            res = stub.UpdateFlowTable(msg)
+            self.adapter_agent.receive_proxied_message(proxy_address, res)
+
+    def packet_out(self, egress_port, msg):
+        self.log.info('sending-packet-out', egress_port=egress_port,
+                      msg=hexify(msg))
+        pkt = Ether(msg)
+        out_pkt = pkt
+        if egress_port != self.nni_port.port_no:
+            # don't do the vlan manipulation for the NNI port, vlans are already correct
+            out_pkt = (
+                    Ether(src=pkt.src, dst=pkt.dst) /
+                    Dot1Q(vlan=egress_port, type=pkt.type) /
+                    pkt.payload
+            )
+
+        # TODO need better way of mapping logical ports to PON ports
+        out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
+
+        # send over grpc stream
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        frame = PonSimFrame(id=self.device_id, payload=str(out_pkt), out_port=out_port)
+        stub.SendFrame(frame)
+
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        # Update the operational status to ACTIVATING and connect status to
+        # UNREACHABLE
+        device = self.adapter_agent.get_device(self.device_id)
+        previous_oper_status = device.oper_status
+        previous_conn_status = device.connect_status
+        device.oper_status = OperStatus.ACTIVATING
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to UNREACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.UNREACHABLE)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the operational status back to its previous state.  With a
+        # real OLT the operational state should be the state the device is
+        # after a reboot.
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = previous_oper_status
+        device.connect_status = previous_conn_status
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to REACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.REACHABLE)
+
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        self.stop_kpi_collection()
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Update the operational status to UNKNOWN
+        device.oper_status = OperStatus.UNKNOWN
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Remove the logical device
+        logical_device = self.adapter_agent.get_logical_device(
+            self.logical_device_id)
+        self.adapter_agent.delete_logical_device(logical_device)
+
+        # Disable all child devices first
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      admin_state=AdminState.DISABLED)
+
+        # Remove the peer references from this device
+        self.adapter_agent.delete_all_peer_references(self.device_id)
+
+        # Set all ports to disabled
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        #  Update the logice device mapping
+        if self.logical_device_id in \
+                self.adapter.logical_device_id_to_root_device_id:
+            del self.adapter.logical_device_id_to_root_device_id[
+                self.logical_device_id]
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=device.id)
+
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Set the ofp_port_no and nni_port in case we bypassed the reconcile
+        # process if the device was in DISABLED state on voltha restart
+        if not self.ofp_port_no and not self.nni_port:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+
+        # Update the connect status to REACHABLE
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Set all ports to enabled
+        self.adapter_agent.enable_all_ports(self.device_id)
+
+        ld = LogicalDevice(
+            # not setting id and datapth_id will let the adapter agent pick id
+            desc=ofp_desc(
+                hw_desc='simulated pon',
+                sw_desc='simulated pon',
+                serial_num=uuid4().hex,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                        OFPC_FLOW_STATS
+                        | OFPC_TABLE_STATS
+                        | OFPC_PORT_STATS
+                        | OFPC_GROUP_STATS
+                )
+            ),
+            root_device_id=device.id
+        )
+        mac_address = "AA:BB:CC:DD:EE:FF"
+        ld_initialized = self.adapter_agent.create_logical_device(ld,
+                                                                  dpid=mac_address)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        self.adapter_agent.add_logical_port(ld_initialized.id, LogicalPort(
+            id='nni',
+            ofp_port=ofp_port(
+                port_no=self.ofp_port_no,
+                hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                name='nni',
+                config=0,
+                state=OFPPS_LIVE,
+                curr=cap,
+                advertised=cap,
+                peer=cap,
+                curr_speed=OFPPF_1GB_FD,
+                max_speed=OFPPF_1GB_FD
+            ),
+            device_id=device.id,
+            device_port_no=self.nni_port.port_no,
+            root_port=True
+        ))
+
+        device = self.adapter_agent.get_device(device.id)
+        device.parent_id = ld_initialized.id
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.device_update(device)
+        self.logical_device_id = ld_initialized.id
+
+        # Reenable all child devices
+        self.adapter_agent.update_child_devices_state(device.id,
+                                                      admin_state=AdminState.ENABLED)
+
+        # establish frame grpc-stream
+        reactor.callInThread(self.rcv_grpc)
+
+        self.start_kpi_collection(device.id)
+
+        self.log.info('re-enabled', device_id=device.id)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        # Remove all child devices
+        self.adapter_agent.delete_all_child_devices(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
+
+    def start_kpi_collection(self, device_id):
+
+        def _collect(device_id, prefix):
+
+            try:
+                # Step 1: gather metrics from device
+                port_metrics = \
+                    self.pm_metrics.collect_port_metrics(self.get_channel())
+
+                # Step 2: prepare the KpiEvent for submission
+                # we can time-stamp them here (or could use time derived from OLT
+                ts = arrow.utcnow().timestamp
+                kpi_event = KpiEvent(
+                    type=KpiEventType.slice,
+                    ts=ts,
+                    prefixes={
+                        # OLT NNI port
+                        prefix + '.nni': MetricValuePairs(
+                            metrics=port_metrics['nni']),
+                        # OLT PON port
+                        prefix + '.pon': MetricValuePairs(
+                            metrics=port_metrics['pon'])
+                    }
+                )
+
+                # Step 3: submit
+                self.adapter_agent.submit_kpis(kpi_event)
+
+            except Exception as e:
+                log.exception('failed-to-submit-kpis', e=e)
+
+        self.pm_metrics.start_collector(_collect)
+
+    def stop_kpi_collection(self):
+        self.pm_metrics.stop_collector()
diff --git a/adapters/ponsim_olt/ponsim_olt.yml b/adapters/ponsim_olt/ponsim_olt.yml
new file mode 100644
index 0000000..fdb647a
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.yml
@@ -0,0 +1,52 @@
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: ponsim_olt.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/adapters/ponsim_onu/VERSION b/adapters/ponsim_onu/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/adapters/ponsim_onu/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/adapters/ponsim_onu/__init__.py b/adapters/ponsim_onu/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/ponsim_onu/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/ponsim_onu/main.py b/adapters/ponsim_onu/main.py
new file mode 100755
index 0000000..63e2bc4
--- /dev/null
+++ b/adapters/ponsim_onu/main.py
@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim ONU Adapter main entry point"""
+
+import argparse
+import arrow
+import os
+import time
+
+import yaml
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+from adapters.protos import third_party
+from adapters.common.structlog_setup import setup_logging, update_logging
+from adapters.common.utils.dockerhelpers import get_my_containers_name
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adapters.common.utils.registry import registry, IComponent
+from packaging.version import Version
+from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, get_messaging_proxy
+from adapters.ponsim_onu.ponsim_onu import PonSimOnuAdapter
+from adapters.protos.adapter_pb2 import AdapterConfig, Adapter
+from adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from adapters.kafka.core_proxy import CoreProxy
+from adapters.common.utils.deferred_utils import TimeOutError
+from adapters.common.utils.asleep import asleep
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './ponsim_onu.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'ponsim_onu'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'ponsim_onu'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to ponsim_onu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of ponsim adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+    log.info(' ____                 _              ___  _   _ _   _    ')
+    log.info('|  _ \ ___  _ __  ___(_)_ __ ___    / _ \| \ | | | | |   ')
+    log.info('| |_) / _ \| \'_ \/ __| | \'_ ` _ \  | | | |  \| | | | | ')
+    log.info('|  __/ (_) | | | \__ \ | | | | | | | |_| | |\  | |_| |   ')
+    log.info('|_|   \___/|_| |_|___/_|_| |_| |_|  \___/|_| \_|\___/    ')
+    log.info('    _       _             _                            ')
+    log.info('   / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|              ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                   ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                   ')
+    log.info('                   |_|                                 ')
+    log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.ponsim_olt_adapter_version = self.get_version()
+        self.log.info('Ponsim-ONU-Adapter-Version', version=
+        self.ponsim_olt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            ponsim_onu_adapter = PonSimOnuAdapter(
+                adapter_agent=self.core_proxy, config=config)
+            ponsim_request_handler = AdapterRequestFacade(
+                adapter=ponsim_onu_adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    target_cls=ponsim_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        # Send registration to Core with adapter specs
+        adapter = Adapter()
+        adapter.id =  self.args.name
+        adapter.vendor = self.args.name
+        adapter.version = self.ponsim_olt_adapter_version
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(adapter)
+                self.log.info('registration-response', response=resp)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/adapters/ponsim_onu/ponsim_onu.py b/adapters/ponsim_onu/ponsim_onu.py
new file mode 100644
index 0000000..7e35c7f
--- /dev/null
+++ b/adapters/ponsim_onu/ponsim_onu.py
@@ -0,0 +1,373 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Fully simulated OLT/ONU adapter.
+"""
+
+import sys
+import structlog
+from twisted.internet.defer import DeferredQueue, inlineCallbacks
+from adapters.common.utils.asleep import asleep
+
+from adapters.iadapter import OnuAdapter
+from adapters.protos import third_party
+from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from adapters.protos.device_pb2 import Port
+from adapters.protos.logical_device_pb2 import LogicalPort
+from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD
+from adapters.protos.openflow_13_pb2 import ofp_port
+from adapters.protos.ponsim_pb2 import FlowTable
+from adapters.protos.core_adapter_pb2 import PortCapability
+
+_ = third_party
+log = structlog.get_logger()
+
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+class PonSimOnuAdapter(OnuAdapter):
+    def __init__(self, adapter_agent, config):
+        # DeviceType of ONU should be same as VENDOR ID of ONU Serial Number as specified by standard
+        # requires for identifying correct adapter or ranged ONU
+        super(PonSimOnuAdapter, self).__init__(adapter_agent=adapter_agent,
+                                               config=config,
+                                               device_handler_class=PonSimOnuHandler,
+                                               name='ponsim_onu',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='ponsim_onu',
+                                               vendor_id='PSMO',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+
+
+class PonSimOnuHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.incoming_messages = DeferredQueue()
+        self.proxy_address = None
+        # reference of uni_port is required when re-enabling the device if
+        # it was disabled previously
+        self.uni_port = None
+        self.pon_port = None
+
+    def receive_message(self, msg):
+        self.incoming_messages.put(msg)
+
+
+    @inlineCallbacks
+    def activate(self, device):
+        self.log.info('activating')
+
+        # TODO:  Register for proxy address
+        # # first we verify that we got parent reference and proxy info
+        # assert device.parent_id
+        # assert device.proxy_address.device_id
+        # assert device.proxy_address.channel_id
+        #
+        # # register for proxied messages right away
+        # self.proxy_address = device.proxy_address
+        # self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+        # populate device info
+        device.root = False
+        device.vendor = 'ponsim'
+        device.model = 'n/a'
+        device.connect_status = ConnectStatus.REACHABLE
+        yield self.adapter_agent.device_update(device)
+
+        # register physical ports
+        self.uni_port = Port(
+            port_no=2,
+            label='UNI facing Ethernet port',
+            type=Port.ETHERNET_UNI,
+            admin_state=AdminState.ENABLED,
+            oper_status=OperStatus.ACTIVE
+        )
+        self.pon_port = Port(
+            port_no=1,
+            label='PON port',
+            type=Port.PON_ONU,
+            admin_state=AdminState.ENABLED,
+            oper_status=OperStatus.ACTIVE,
+            peers=[
+                Port.PeerPort(
+                    device_id=device.parent_id,
+                    port_no=device.parent_port_no
+                )
+            ]
+        )
+        self.adapter_agent.port_created(device.id, self.uni_port)
+        self.adapter_agent.port_created(device.id, self.pon_port)
+
+        yield self.adapter_agent.device_state_update(device.id, oper_status=OperStatus.ACTIVE)
+
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no, device_id=device.id)
+        # port_no = device.proxy_address.channel_id
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port = LogicalPort (
+                id='uni-{}'.format(port_no),
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
+                    name='uni-{}'.format(port_no),
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                )
+            )
+        )
+
+    def _get_uni_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_UNI)
+        if ports:
+            # For now, we use on one uni port
+            return ports[0]
+
+    def _get_pon_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
+        if ports:
+            # For now, we use on one uni port
+            return ports[0]
+
+    def reconcile(self, device):
+        self.log.info('reconciling-ONU-device-starts')
+
+        # first we verify that we got parent reference and proxy info
+        assert device.parent_id
+        assert device.proxy_address.device_id
+        assert device.proxy_address.channel_id
+
+        # register for proxied messages right away
+        self.proxy_address = device.proxy_address
+        self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+        # Set the connection status to REACHABLE
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.update_device(device)
+
+        # TODO: Verify that the uni, pon and logical ports exists
+
+        # Mark the device as REACHABLE and ACTIVE
+        device = self.adapter_agent.get_device(device.id)
+        device.connect_status = ConnectStatus.REACHABLE
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.update_device(device)
+
+        self.log.info('reconciling-ONU-device-ends')
+
+    @inlineCallbacks
+    def update_flow_table(self, flows):
+
+        # we need to proxy through the OLT to get to the ONU
+
+        # reset response queue
+        while self.incoming_messages.pending:
+            yield self.incoming_messages.get()
+
+        msg = FlowTable(
+            port=self.proxy_address.channel_id,
+            flows=flows
+        )
+        self.adapter_agent.send_proxied_message(self.proxy_address, msg)
+
+        yield self.incoming_messages.get()
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes.
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        # Update the operational status to ACTIVATING and connect status to
+        # UNREACHABLE
+        device = self.adapter_agent.get_device(self.device_id)
+        previous_oper_status = device.oper_status
+        previous_conn_status = device.connect_status
+        device.oper_status = OperStatus.ACTIVATING
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.update_device(device)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the operational status back to its previous state.  With a
+        # real OLT the operational state should be the state the device is
+        # after a reboot.
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = previous_oper_status
+        device.connect_status = previous_conn_status
+        self.adapter_agent.update_device(device)
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Disable all ports on that device
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        # Update the device operational status to UNKNOWN
+        device.oper_status = OperStatus.UNKNOWN
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.update_device(device)
+
+        # Remove the uni logical port from the OLT, if still present
+        parent_device = self.adapter_agent.get_device(device.parent_id)
+        assert parent_device
+        logical_device_id = parent_device.parent_id
+        assert logical_device_id
+        port_no = device.proxy_address.channel_id
+        port_id = 'uni-{}'.format(port_no)
+        try:
+            port = self.adapter_agent.get_logical_port(logical_device_id,
+                                                       port_id)
+            self.adapter_agent.delete_logical_port(logical_device_id, port)
+        except KeyError:
+            self.log.info('logical-port-not-found', device_id=self.device_id,
+                          portid=port_id)
+
+        # Remove pon port from parent
+        self.pon_port = self._get_pon_port()
+        self.adapter_agent.delete_port_reference_from_parent(self.device_id,
+                                                             self.pon_port)
+
+        # Just updating the port status may be an option as well
+        # port.ofp_port.config = OFPPC_NO_RECV
+        # yield self.adapter_agent.update_logical_port(logical_device_id,
+        #                                             port)
+        # Unregister for proxied message
+        self.adapter_agent.unregister_for_proxied_messages(
+            device.proxy_address)
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=device.id)
+
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+        try:
+            # Get the latest device reference
+            device = self.adapter_agent.get_device(self.device_id)
+
+            # First we verify that we got parent reference and proxy info
+            assert device.parent_id
+            assert device.proxy_address.device_id
+            assert device.proxy_address.channel_id
+
+            # Re-register for proxied messages right away
+            self.proxy_address = device.proxy_address
+            self.adapter_agent.register_for_proxied_messages(
+                device.proxy_address)
+
+            # Re-enable the ports on that device
+            self.adapter_agent.enable_all_ports(self.device_id)
+
+            # Refresh the port reference
+            self.uni_port = self._get_uni_port()
+            self.pon_port = self._get_pon_port()
+
+            # Add the pon port reference to the parent
+            self.adapter_agent.add_port_reference_to_parent(device.id,
+                                                            self.pon_port)
+
+            # Update the connect status to REACHABLE
+            device.connect_status = ConnectStatus.REACHABLE
+            self.adapter_agent.update_device(device)
+
+            # re-add uni port to logical device
+            parent_device = self.adapter_agent.get_device(device.parent_id)
+            logical_device_id = parent_device.parent_id
+            assert logical_device_id
+            port_no = device.proxy_address.channel_id
+            cap = OFPPF_1GB_FD | OFPPF_FIBER
+            self.adapter_agent.add_logical_port(logical_device_id, LogicalPort(
+                id='uni-{}'.format(port_no),
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
+                    name='uni-{}'.format(port_no),
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                ),
+                device_id=device.id,
+                device_port_no=self.uni_port.port_no
+            ))
+
+            device = self.adapter_agent.get_device(device.id)
+            device.oper_status = OperStatus.ACTIVE
+            self.adapter_agent.update_device(device)
+
+            self.log.info('re-enabled', device_id=device.id)
+        except Exception, e:
+            self.log.exception('error-reenabling', e=e)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        # A delete request may be received when an OLT is dsiabled
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
diff --git a/adapters/ponsim_onu/ponsim_onu.yml b/adapters/ponsim_onu/ponsim_onu.yml
new file mode 100644
index 0000000..1afafdf
--- /dev/null
+++ b/adapters/ponsim_onu/ponsim_onu.yml
@@ -0,0 +1,52 @@
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: ponsim_onu.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/adapters/protos/Makefile b/adapters/protos/Makefile
new file mode 100644
index 0000000..0fad970
--- /dev/null
+++ b/adapters/protos/Makefile
@@ -0,0 +1,101 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: third_party build
+
+PROTO_FILES := $(wildcard ../../protos/*.proto)
+PROTO_GOOGLE_API := $(wildcard third_party/google/api/*.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GRPC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_DESC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,.desc,$(f)))
+
+PROTOC_PREFIX := /usr/local
+PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
+
+PROTOC := $(PROTOC_PREFIX)/bin/protoc
+
+PROTOC_VERSION := "3.3.0"
+PROTOC_DOWNLOAD_PREFIX := "https://github.com/google/protobuf/releases/download"
+PROTOC_DIR := protobuf-$(PROTOC_VERSION)
+PROTOC_TARBALL := protobuf-python-$(PROTOC_VERSION).tar.gz
+PROTOC_DOWNLOAD_URI := $(PROTOC_DOWNLOAD_PREFIX)/v$(PROTOC_VERSION)/$(PROTOC_TARBALL)
+PROTOC_BUILD_TMP_DIR := "/tmp/protobuf-build-$(shell uname -s | tr '[:upper:]' '[:lower:]')"
+
+# Google API needs to be built from within the third party directory
+#
+third_party: google_api
+google_api:
+	@echo "Building protocol buffer artifacts from third_party google api"
+	cd third_party ; \
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I. \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=google/api/annotations.desc \
+	    --include_imports \
+	    --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTOC) $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto Makefile
+	@echo "Building protocol buffer artifacts from $<"
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I../../protos \
+	    -I./third_party \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=./$(basename $(notdir $<)).desc \
+	    --include_imports \
+	    --include_source_info \
+	    $<
+
+clean:
+	rm -f *.desc *_pb2* \
+		$(PROTO_PB2_GOOGLE_API) \
+		$(PROTO_PB2_GRPC_GOOGLE_API)\
+		$(PROTO_DESC_GOOGLE_API)
+
+$(PROTOC):
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+	@echo "It looks like you don't have protocol buffer tools installed."
+	@echo "To install the protocol buffer toolchain, you can run:"
+	@echo "    make install-protoc"
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+
+install-protoc: $(PROTOC)
+	@echo "Downloading and installing protocol buffer support."
+	@echo "Installation will require sodo priviledges"
+	@echo "This will take a few minutes."
+	mkdir -p $(PROTOC_BUILD_TMP_DIR)
+	@echo "We ask for sudo credentials now so we can install at the end"; \
+	sudo echo "Thanks"; \
+	    cd $(PROTOC_BUILD_TMP_DIR); \
+	    wget $(PROTOC_DOWNLOAD_URI); \
+	    tar xzvf $(PROTOC_TARBALL); \
+	    cd $(PROTOC_DIR); \
+	    ./configure --prefix=$(PROTOC_PREFIX); \
+	    make; \
+	    sudo make install
+
+uninstall-protoc:
+	cd $(PROTOC_BUILD_TMP_DIR)/$(PROTOC_DIR); \
+	    sudo make uninstall
+
diff --git a/adapters/protos/__init__.py b/adapters/protos/__init__.py
new file mode 100644
index 0000000..cfcdc97
--- /dev/null
+++ b/adapters/protos/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
\ No newline at end of file
diff --git a/adapters/protos/third_party/__init__.py b/adapters/protos/third_party/__init__.py
new file mode 100644
index 0000000..2740afe
--- /dev/null
+++ b/adapters/protos/third_party/__init__.py
@@ -0,0 +1,53 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This helps loading http_pb2 and annotations_pb2.
+Without this, the Python importer will not be able to process the lines:
+from google.api import http_pb2 or
+from google.api import annotations_pb2
+(Without importing these, the protobuf loader will not recognize http options
+in the protobuf definitions.)
+"""
+
+from importlib import import_module
+import os
+import sys
+
+
+class GoogleApiImporter(object):
+
+    def find_module(self, full_name, path=None):
+        if full_name == 'google.api':
+            self.path = [os.path.dirname(__file__)]
+            return self
+
+    def load_module(self, name):
+        if name in sys.modules:
+            return sys.modules[name]
+        full_name = 'adapters.protos.third_party.' + name
+        import_module(full_name)
+        module = sys.modules[full_name]
+        sys.modules[name] = module
+        return module
+
+
+sys.meta_path.append(GoogleApiImporter())
+try:
+    from google.api import http_pb2, annotations_pb2
+    _ = http_pb2, annotations_pb2
+except AssertionError:
+    pass
diff --git a/adapters/protos/third_party/google/LICENSE b/adapters/protos/third_party/google/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/adapters/protos/third_party/google/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/adapters/protos/third_party/google/__init__.py b/adapters/protos/third_party/google/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/protos/third_party/google/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/protos/third_party/google/api/__init__.py b/adapters/protos/third_party/google/api/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/protos/third_party/google/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/protos/third_party/google/api/annotations.proto b/adapters/protos/third_party/google/api/annotations.proto
new file mode 100644
index 0000000..cbd18b8
--- /dev/null
+++ b/adapters/protos/third_party/google/api/annotations.proto
@@ -0,0 +1,29 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/adapters/protos/third_party/google/api/http.proto b/adapters/protos/third_party/google/api/http.proto
new file mode 100644
index 0000000..ce07aa1
--- /dev/null
+++ b/adapters/protos/third_party/google/api/http.proto
@@ -0,0 +1,127 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API
+// methods. The mapping determines what portions of the request message are
+// populated from the path, query parameters, or body of the HTTP request.  The
+// mapping is typically specified as an `google.api.http` annotation, see
+// "google/api/annotations.proto" for details.
+//
+// The mapping consists of a mandatory field specifying a path template and an
+// optional `body` field specifying what data is represented in the HTTP request
+// body. The field name for the path indicates the HTTP method. Example:
+//
+// ```
+// package google.storage.v2;
+//
+// import "google/api/annotations.proto";
+//
+// service Storage {
+//   rpc CreateObject(CreateObjectRequest) returns (Object) {
+//     option (google.api.http) {
+//       post: "/v2/{bucket_name=buckets/*}/objects"
+//       body: "object"
+//     };
+//   };
+// }
+// ```
+//
+// Here `bucket_name` and `object` bind to fields of the request message
+// `CreateObjectRequest`.
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// `*` matches a single path component, `**` zero or more path components, and
+// `LITERAL` a constant.  A `Variable` can match an entire path as specified
+// again by a template; this nested template must not contain further variables.
+// If no template is given with a variable, it matches a single path component.
+// The notation `{var}` is henceforth equivalent to `{var=*}`.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+message HttpRule {
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Used for listing and getting information about resources.
+    string get = 2;
+
+    // Used for updating a resource.
+    string put = 3;
+
+    // Used for creating a resource.
+    string post = 4;
+
+    // Used for deleting a resource.
+    string delete = 5;
+
+    // Used for updating a resource.
+    string patch = 6;
+
+    // Custom pattern is used for defining custom verbs.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP body, or
+  // `*` for mapping all fields not captured by the path pattern to the HTTP
+  // body.
+  string body = 7;
+
+  // Additional HTTP bindings for the selector. Nested bindings must not
+  // specify a selector and must not contain additional bindings.
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/adapters/requirements.txt b/adapters/requirements.txt
new file mode 100755
index 0000000..a0641b2
--- /dev/null
+++ b/adapters/requirements.txt
@@ -0,0 +1,68 @@
+argparse==1.2.1
+arrow==0.10.0
+bitstring==3.1.5
+cmd2==0.7.0
+colorama==0.3.9
+cython==0.24.1
+decorator==4.1.2
+docker-py==1.10.6
+fluent-logger==0.6.0
+grpc==0.3.post19
+grpcio==1.3.5
+grpcio-tools==1.3.5
+hash_ring==1.3.1
+hexdump==3.3
+jinja2==2.8
+jsonpatch==1.16
+kafka_python==1.3.5
+klein==17.10.0
+kubernetes==5.0.0
+netaddr==0.7.19
+networkx==2.0
+nose==1.3.7
+nose-exclude==0.5.0
+nose-testconfig==0.10
+mock==2.0.0
+netifaces==0.10.6
+pcapy==0.11.1
+pep8==1.7.1
+pep8-naming>=0.3.3
+protobuf==3.3.0
+protobuf-to-dict==0.1.0
+pyflakes==1.6.0
+pylint==1.7.6
+#pypcap>=1.1.5
+pyOpenSSL==17.3.0
+PyYAML==3.12
+requests==2.18.4
+scapy==2.3.3
+service-identity==17.0.0
+simplejson==3.12.0
+jsonschema==2.6.0
+six==1.11.0
+structlog==17.2.0
+termcolor==1.1.0
+transitions==0.6.4
+treq==17.8.0
+Twisted==17.9.0
+txaioetcd==0.3.0
+urllib3==1.22
+pyang==1.7.3
+lxml==3.6.4
+nosexcover==1.0.11
+zmq==0.0.0
+pyzmq==16.0.3
+txZMQ==0.8.0
+ncclient==0.5.3
+xmltodict==0.11.0
+dicttoxml==1.7.4
+etcd3==0.7.0
+pyparsing==2.2.0
+packaging==17.1
+
+# python-consul>=0.6.1  we need the pre-released version for now, because 0.6.1 does not
+# yet support Twisted. Once this is released, it will be the 0.6.2 version
+git+https://github.com/cablehead/python-consul.git
+
+# Twisted Python kafka client
+git+https://github.com/ciena/afkak.git
diff --git a/common/core/northbound/grpc/default_api_handler.go b/common/core/northbound/grpc/default_api_handler.go
index 8fa31d9..45e8f36 100644
--- a/common/core/northbound/grpc/default_api_handler.go
+++ b/common/core/northbound/grpc/default_api_handler.go
@@ -28,6 +28,10 @@
 type DefaultAPIHandler struct {
 }
 
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
 func NewDefaultAPIHandler() *DefaultAPIHandler {
 	handler := &DefaultAPIHandler{}
 	return handler
diff --git a/common/log/log.go b/common/log/log.go
index 38a051d..e86ba0d 100644
--- a/common/log/log.go
+++ b/common/log/log.go
@@ -19,6 +19,7 @@
 	"fmt"
 	zp "go.uber.org/zap"
 	zc "go.uber.org/zap/zapcore"
+	"path"
 	"runtime"
 	"strings"
 )
@@ -79,6 +80,10 @@
 type Fields map[string]interface{}
 
 var defaultLogger *logger
+var cfg zp.Config
+
+var loggers map[string]*logger
+var cfgs map[string]zp.Config
 
 type logger struct {
 	log    *zp.SugaredLogger
@@ -125,12 +130,9 @@
 	}
 }
 
-var cfg zp.Config
-
 // SetLogger needs to be invoked before the logger API can be invoked.  This function
 // initialize the default logger (zap's sugaredlogger)
-func SetLogger(outputType string, level int, defaultFields Fields) (Logger, error) {
-
+func SetDefaultLogger(outputType string, level int, defaultFields Fields) (Logger, error) {
 	// Build a custom config using zap
 	cfg = getDefaultConfig(outputType, level, defaultFields)
 
@@ -147,28 +149,127 @@
 	return defaultLogger, nil
 }
 
-func SetLoglevel(level int) {
-	switch level {
-	case DebugLevel:
-		cfg.Level.SetLevel(zc.DebugLevel)
-	case InfoLevel:
-		cfg.Level.SetLevel(zc.InfoLevel)
-	case WarnLevel:
-		cfg.Level.SetLevel(zc.WarnLevel)
-	case ErrorLevel:
-		cfg.Level.SetLevel(zc.ErrorLevel)
-	case PanicLevel:
-		cfg.Level.SetLevel(zc.PanicLevel)
-	case FatalLevel:
-		cfg.Level.SetLevel(zc.FatalLevel)
-	default:
-		cfg.Level.SetLevel(zc.ErrorLevel)
+func SetPackageLevelLoggers(outputType string, level int, defaultFields Fields, pkgNames []string) error {
+	cfgs = make(map[string]zp.Config)
+	loggers = make(map[string]*logger)
+	for _, pkg := range pkgNames {
+		// Build a custom config using zap - for now initialzie all packages uses the same config
+		cfgs[pkg] = getDefaultConfig(outputType, level, defaultFields)
+
+		l, err := cfgs[pkg].Build()
+		if err != nil {
+			return err
+		}
+
+		loggers[pkg] = &logger{
+			log:    l.Sugar(),
+			parent: l,
+		}
+	}
+
+	return nil
+}
+
+func AddPackage(outputType string, level int, defaultFields Fields) error {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*logger)
+	}
+	pkgName, _, _, _ := getCallerInfo()
+
+	if _, exist := cfgs[pkgName]; exist {
+		return nil
+	}
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build()
+	if err != nil {
+		return err
+	}
+
+	loggers[pkgName] = &logger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+	return nil
+}
+
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build()
+		if err != nil {
+			return err
+		}
+
+		loggers[pkgName] = &logger{
+			log:    l.Sugar(),
+			parent: l,
+		}
+	}
+	return nil
+}
+
+//func SetDefaultLoglevel(level int) {
+//	switch level {
+//	case DebugLevel:
+//		cfg.Level.SetLevel(zc.DebugLevel)
+//	case InfoLevel:
+//		cfg.Level.SetLevel(zc.InfoLevel)
+//	case WarnLevel:
+//		cfg.Level.SetLevel(zc.WarnLevel)
+//	case ErrorLevel:
+//		cfg.Level.SetLevel(zc.ErrorLevel)
+//	case PanicLevel:
+//		cfg.Level.SetLevel(zc.PanicLevel)
+//	case FatalLevel:
+//		cfg.Level.SetLevel(zc.FatalLevel)
+//	default:
+//		cfg.Level.SetLevel(zc.ErrorLevel)
+//	}
+//}
+
+func SetPackageLogLevel(packageName string, level int) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		switch level {
+		case DebugLevel:
+			cfg.Level.SetLevel(zc.DebugLevel)
+		case InfoLevel:
+			cfg.Level.SetLevel(zc.InfoLevel)
+		case WarnLevel:
+			cfg.Level.SetLevel(zc.WarnLevel)
+		case ErrorLevel:
+			cfg.Level.SetLevel(zc.ErrorLevel)
+		case PanicLevel:
+			cfg.Level.SetLevel(zc.PanicLevel)
+		case FatalLevel:
+			cfg.Level.SetLevel(zc.FatalLevel)
+		default:
+			cfg.Level.SetLevel(zc.ErrorLevel)
+		}
 	}
 }
 
 // CleanUp flushed any buffered log entries. Applications should take care to call
 // CleanUp before exiting.
 func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
 	if defaultLogger != nil {
 		if defaultLogger.parent != nil {
 			if err := defaultLogger.parent.Sync(); err != nil {
@@ -181,58 +282,87 @@
 
 // GetLogger returned the default logger.  If SetLogger was not previously invoked then
 // this method will return an error
-func GetLogger() (Logger, error) {
-	if defaultLogger == nil {
-		// Setup the logger with default values - debug level,
-		SetLogger(JSON, 0, Fields{"instanceId": "default-logger"})
-		//return nil, errors.New("Uninitialized-logger")
-	}
-	return defaultLogger, nil
-}
+//func GetLogger() (Logger, error) {
+//	if defaultLogger == nil {
+//		// Setup the logger with default values - debug level,
+//		SetDefaultLogger(JSON, 0, Fields{"instanceId": "default-logger"})
+//		//return nil, errors.New("Uninitialized-logger")
+//	}
+//	return defaultLogger, nil
+//}
 
-func extractFileNameAndLineNumber(skipLevel int) (string, int) {
-	_, file, line, ok := runtime.Caller(skipLevel)
-	var key string
-	if !ok {
-		key = "<???>"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		key = file[slash+1:]
-	}
-	return key, line
-}
+//func extractFileNameAndLineNumber(skipLevel int) (string, int) {
+//	_, file, line, ok := runtime.Caller(skipLevel)
+//	var key string
+//	if !ok {
+//		key = "<???>"
+//		line = 1
+//	} else {
+//		slash := strings.LastIndex(file, "/")
+//		key = file[slash+1:]
+//	}
+//	return key, line
+//}
 
 // sourced adds a source field to the logger that contains
 // the file name and line where the logging happened.
-func (l *logger) sourced() *zp.SugaredLogger {
-	key, line := extractFileNameAndLineNumber(3)
-	if strings.HasSuffix(key, "log.go") || strings.HasSuffix(key, "proc.go") {
-		// Go to a lower level
-		key, line = extractFileNameAndLineNumber(2)
-	}
-	if !strings.HasSuffix(key, ".go") {
-		// Go to a higher level
-		key, line = extractFileNameAndLineNumber(4)
+//func (l *logger) sourced() *zp.SugaredLogger {
+//	key, line := extractFileNameAndLineNumber(3)
+//	if strings.HasSuffix(key, "log.go") || strings.HasSuffix(key, "proc.go") {
+//		// Go to a lower level
+//		key, line = extractFileNameAndLineNumber(2)
+//	}
+//	if !strings.HasSuffix(key, ".go") {
+//		// Go to a higher level
+//		key, line = extractFileNameAndLineNumber(4)
+//	}
+//
+//	return l.log.With("caller", fmt.Sprintf("%s:%d", key, line))
+//}
+
+func retrieveCallInfo(skiplevel int) (string, string, string, int) {
+	pc, file, line, _ := runtime.Caller(skiplevel)
+	_, fileName := path.Split(file)
+	parts := strings.Split(runtime.FuncForPC(pc).Name(), ".")
+	pl := len(parts)
+	packageName := ""
+	funcName := parts[pl-1]
+
+	if parts[pl-2][0] == '(' {
+		//funcName = parts[pl-2] + "." + funcName
+		packageName = strings.Join(parts[0:pl-2], ".")
+	} else {
+		packageName = strings.Join(parts[0:pl-1], ".")
 	}
 
-	return l.log.With("caller", fmt.Sprintf("%s:%d", key, line))
+	return packageName, fileName, funcName, line
 }
 
-//func serializeMap(fields Fields) []interface{} {
-//	data := make([]interface{}, len(fields)*2+2)
-//	i := 0
-//	for k, v := range fields {
-//		data[i] = k
-//		data[i+1] = v
-//		i = i + 2
-//	}
-//	key, line := extractFileNameAndLineNumber(3)
-//	data[i] = "caller"
-//	data[i+1] = fmt.Sprintf("%s:%d", key, line)
-//
-//	return data
-//}
+func getCallerInfo() (string, string, string, int) {
+	packageName, fileName, funcName, line := retrieveCallInfo(3)
+
+	if strings.HasSuffix(funcName, "log.go") || strings.HasSuffix(funcName, "proc.go") || strings.HasSuffix(packageName, ".init") {
+		// Go to a lower level
+		packageName, fileName, funcName, line = retrieveCallInfo(2)
+	}
+	if !strings.HasSuffix(funcName, ".go") {
+		// Go to a higher level
+		packageName, fileName, funcName, line = retrieveCallInfo(4)
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+	return packageName, fileName, funcName, line
+}
+
+func getPackageLevelLogger() *zp.SugaredLogger {
+	pkgName, fileName, funcName, line := getCallerInfo()
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+	}
+	return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+}
 
 func serializeMap(fields Fields) []interface{} {
 	data := make([]interface{}, len(fields)*2)
@@ -359,110 +489,110 @@
 
 // With returns a logger initialized with the key-value pairs
 func With(keysAndValues Fields) Logger {
-	return logger{log: defaultLogger.sourced().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
+	return logger{log: getPackageLevelLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
 }
 
 // Debug logs a message at level Debug on the standard logger.
 func Debug(args ...interface{}) {
-	defaultLogger.sourced().Debug(args...)
+	getPackageLevelLogger().Debug(args...)
 }
 
 // Debugln logs a message at level Debug on the standard logger.
 func Debugln(args ...interface{}) {
-	defaultLogger.sourced().Debug(args...)
+	getPackageLevelLogger().Debug(args...)
 }
 
 // Debugf logs a message at level Debug on the standard logger.
 func Debugf(format string, args ...interface{}) {
-	defaultLogger.sourced().Debugf(format, args...)
+	getPackageLevelLogger().Debugf(format, args...)
 }
 
 // Debugw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func Debugw(msg string, keysAndValues Fields) {
-	defaultLogger.sourced().Debugw(msg, serializeMap(keysAndValues)...)
+	getPackageLevelLogger().Debugw(msg, serializeMap(keysAndValues)...)
 }
 
 // Info logs a message at level Info on the standard logger.
 func Info(args ...interface{}) {
-	defaultLogger.sourced().Info(args...)
+	getPackageLevelLogger().Info(args...)
 }
 
 // Infoln logs a message at level Info on the standard logger.
 func Infoln(args ...interface{}) {
-	defaultLogger.sourced().Info(args...)
+	getPackageLevelLogger().Info(args...)
 }
 
 // Infof logs a message at level Info on the standard logger.
 func Infof(format string, args ...interface{}) {
-	defaultLogger.sourced().Infof(format, args...)
+	getPackageLevelLogger().Infof(format, args...)
 }
 
 //Infow logs a message with some additional context. The variadic key-value
 //pairs are treated as they are in With.
 func Infow(msg string, keysAndValues Fields) {
-	defaultLogger.sourced().Infow(msg, serializeMap(keysAndValues)...)
+	getPackageLevelLogger().Infow(msg, serializeMap(keysAndValues)...)
 }
 
 // Warn logs a message at level Warn on the standard logger.
 func Warn(args ...interface{}) {
-	defaultLogger.sourced().Warn(args...)
+	getPackageLevelLogger().Warn(args...)
 }
 
 // Warnln logs a message at level Warn on the standard logger.
 func Warnln(args ...interface{}) {
-	defaultLogger.sourced().Warn(args...)
+	getPackageLevelLogger().Warn(args...)
 }
 
 // Warnf logs a message at level Warn on the standard logger.
 func Warnf(format string, args ...interface{}) {
-	defaultLogger.sourced().Warnf(format, args...)
+	getPackageLevelLogger().Warnf(format, args...)
 }
 
 // Warnw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func Warnw(msg string, keysAndValues Fields) {
-	defaultLogger.sourced().Warnw(msg, serializeMap(keysAndValues)...)
+	getPackageLevelLogger().Warnw(msg, serializeMap(keysAndValues)...)
 }
 
 // Error logs a message at level Error on the standard logger.
 func Error(args ...interface{}) {
-	defaultLogger.sourced().Error(args...)
+	getPackageLevelLogger().Error(args...)
 }
 
 // Errorln logs a message at level Error on the standard logger.
 func Errorln(args ...interface{}) {
-	defaultLogger.sourced().Error(args...)
+	getPackageLevelLogger().Error(args...)
 }
 
 // Errorf logs a message at level Error on the standard logger.
 func Errorf(format string, args ...interface{}) {
-	defaultLogger.sourced().Errorf(format, args...)
+	getPackageLevelLogger().Errorf(format, args...)
 }
 
 // Errorw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func Errorw(msg string, keysAndValues Fields) {
-	defaultLogger.sourced().Errorw(msg, serializeMap(keysAndValues)...)
+	getPackageLevelLogger().Errorw(msg, serializeMap(keysAndValues)...)
 }
 
 // Fatal logs a message at level Fatal on the standard logger.
 func Fatal(args ...interface{}) {
-	defaultLogger.sourced().Fatal(args...)
+	getPackageLevelLogger().Fatal(args...)
 }
 
 // Fatalln logs a message at level Fatal on the standard logger.
 func Fatalln(args ...interface{}) {
-	defaultLogger.sourced().Fatal(args...)
+	getPackageLevelLogger().Fatal(args...)
 }
 
 // Fatalf logs a message at level Fatal on the standard logger.
 func Fatalf(format string, args ...interface{}) {
-	defaultLogger.sourced().Fatalf(format, args...)
+	getPackageLevelLogger().Fatalf(format, args...)
 }
 
 // Fatalw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func Fatalw(msg string, keysAndValues Fields) {
-	defaultLogger.sourced().Fatalw(msg, serializeMap(keysAndValues)...)
+	getPackageLevelLogger().Fatalw(msg, serializeMap(keysAndValues)...)
 }
diff --git a/compose/docker-compose-zk-kafka-test.yml b/compose/docker-compose-zk-kafka-test.yml
index 2cad1b8..30eea0a 100644
--- a/compose/docker-compose-zk-kafka-test.yml
+++ b/compose/docker-compose-zk-kafka-test.yml
@@ -13,11 +13,12 @@
   # Single-node kafka service
   #
   kafka:
-    image: "wurstmeister/kafka:latest"
+    image: "wurstmeister/kafka:1.1.0"
     ports:
      - 9092:9092
     environment:
-      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.100.198.220:9092
+#      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.100.198.220:9092
+      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.176.230.190:9092
       KAFKA_LISTENERS: PLAINTEXT://:9092
 #      KAFKA_ADVERTISED_HOST_NAME: 10.100.198.220
       KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
diff --git a/compose/ponsim-adapters.yml b/compose/ponsim-adapters.yml
new file mode 100644
index 0000000..fff3730
--- /dev/null
+++ b/compose/ponsim-adapters.yml
@@ -0,0 +1,41 @@
+version: '2'
+services:
+  ponsim_olt_adapter:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-olt${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/adapters/adapters/ponsim_olt/main.py",
+      "-v",
+      "--name=ponsim_olt",
+      "--kafka_adapter=10.176.230.190:9092",
+      "--kafka_cluster=10.176.230.190:9092",
+      "--core_topic=rwcore"
+    ]
+    networks:
+    - default
+
+  ponsim_onu_adapter:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim-adapter-onu${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    command: [
+      "/adapters/adapters/ponsim_onu/main.py",
+      "-v",
+      "--name=ponsim_onu",
+      "--kafka_adapter=10.176.230.190:9092",
+      "--kafka_cluster=10.176.230.190:9092",
+      "--core_topic=rwcore"
+    ]
+    networks:
+    - default
+
+networks:
+  default:
+    driver: bridge
\ No newline at end of file
diff --git a/compose/ponsim.yml b/compose/ponsim.yml
new file mode 100644
index 0000000..fde48df
--- /dev/null
+++ b/compose/ponsim.yml
@@ -0,0 +1,57 @@
+version: '2'
+services:
+  ponsim_olt:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    entrypoint:
+      - /app/ponsim
+      - -device_type
+      - "OLT"
+      - -onus
+      - "4"
+      - -internal_if
+      - "eth0"
+      - -external_if
+      - "eth0"
+      - -vcore_endpoint
+      - "vcore"
+      - -promiscuous
+      - -verbose
+    ports:
+      - "50060:50060"
+    networks:
+    - default
+
+  ponsim_onu:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    entrypoint:
+      - /app/ponsim
+      - -device_type
+      - "ONU"
+      - -parent_addr
+      - "ponsim_olt"
+      - -grpc_port
+      - "50061"
+      - -internal_if
+      - "eth0"
+      - -external_if
+      - "lo"
+      - -promiscuous
+      - -verbose
+    ports:
+    - "50061:50061"
+    networks:
+    - default
+
+networks:
+  default:
+    driver: bridge
\ No newline at end of file
diff --git a/compose/ponsim_olt.yml b/compose/ponsim_olt.yml
new file mode 100644
index 0000000..be3f61e
--- /dev/null
+++ b/compose/ponsim_olt.yml
@@ -0,0 +1,31 @@
+version: '2'
+services:
+  ponsim_olt:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    entrypoint:
+      - /app/ponsim
+      - -device_type
+      - "OLT"
+      - -onus
+      - "4"
+      - -internal_if
+      - "eth0"
+      - -external_if
+      - "eth0"
+      - -vcore_endpoint
+      - "vcore"
+      - -promiscuous
+      - -verbose
+    ports:
+      - "50060:50060"
+    networks:
+    - default
+
+networks:
+  default:
+    driver: bridge
\ No newline at end of file
diff --git a/compose/ponsim_onu.yml b/compose/ponsim_onu.yml
new file mode 100644
index 0000000..7462f11
--- /dev/null
+++ b/compose/ponsim_onu.yml
@@ -0,0 +1,31 @@
+version: '2'
+services:
+  ponsim_onu:
+    image: "${REGISTRY}${REPOSITORY}voltha-ponsim${TAG}"
+    logging:
+      driver: "json-file"
+      options:
+        max-size: "10m"
+        max-file: "3"
+    entrypoint:
+      - /app/ponsim
+      - -device_type
+      - "ONU"
+      - -parent_addr
+      - "ponsim_olt"
+      - -grpc_port
+      - "50061"
+      - -internal_if
+      - "eth0"
+      - -external_if
+      - "lo"
+      - -promiscuous
+      - -verbose
+    ports:
+    - "50061:50061"
+    networks:
+    - default
+
+networks:
+  default:
+    driver: bridge
\ No newline at end of file
diff --git a/compose/rw_core.yml b/compose/rw_core.yml
index a11f4a9..5df0147 100644
--- a/compose/rw_core.yml
+++ b/compose/rw_core.yml
@@ -10,6 +10,12 @@
         - -grpc_host=${DOCKER_HOST_IP}
         - -grpc_port=50057
         - -banner=true
+        - -kafka_adapter_host=10.176.230.190
+        - -kafka_adapter_port=9092
+        - -kafka_cluster_host=10.176.230.190
+        - -kafka_cluster_port=9092
+        - -rw_core_topic=rwcore
+        - -log_level=0
     ports:
       - "50057:50057"
     volumes:
diff --git a/db/kvstore/client.go b/db/kvstore/client.go
index 8b1c914..a7cbf2b 100644
--- a/db/kvstore/client.go
+++ b/db/kvstore/client.go
@@ -15,6 +15,10 @@
  */
 package kvstore
 
+import (
+	"github.com/opencord/voltha-go/common/log"
+)
+
 const (
 	// Default timeout in seconds when making a kvstore request
 	defaultKVGetTimeout = 5
@@ -38,6 +42,10 @@
 	Lease   int64
 }
 
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
 // NewKVPair creates a new KVPair object
 func NewKVPair(key string, value interface{}, session string, lease int64) *KVPair {
 	kv := new(KVPair)
diff --git a/db/kvstore/etcdclient.go b/db/kvstore/etcdclient.go
index a6a1433..490a477 100644
--- a/db/kvstore/etcdclient.go
+++ b/db/kvstore/etcdclient.go
@@ -20,9 +20,11 @@
 	"context"
 	"errors"
 	"fmt"
-	v3Client "github.com/coreos/etcd/clientv3"
-	v3rpcTypes "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	//v3Client "github.com/coreos/etcd/clientv3"
+	//v3rpcTypes "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
 	log "github.com/opencord/voltha-go/common/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
 	"sync"
 )
 
diff --git a/db/model/backend.go b/db/model/backend.go
index 340dd26..cd92c0b 100644
--- a/db/model/backend.go
+++ b/db/model/backend.go
@@ -18,10 +18,10 @@
 import (
 	"errors"
 	"fmt"
+	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/kvstore"
 	"strconv"
 	"time"
-	"github.com/opencord/voltha-go/common/log"
 )
 
 //TODO: missing cache stuff
diff --git a/db/model/non_persisted_revision.go b/db/model/non_persisted_revision.go
index 4a97941..06bb53d 100644
--- a/db/model/non_persisted_revision.go
+++ b/db/model/non_persisted_revision.go
@@ -19,9 +19,9 @@
 	"bytes"
 	"crypto/md5"
 	"fmt"
+	"github.com/opencord/voltha-go/common/log"
 	"reflect"
 	"sort"
-	"github.com/opencord/voltha-go/common/log"
 )
 
 var (
diff --git a/db/model/persisted_revision.go b/db/model/persisted_revision.go
index 805557a..ece0bb0 100644
--- a/db/model/persisted_revision.go
+++ b/db/model/persisted_revision.go
@@ -186,7 +186,7 @@
 	newPR := &PersistedRevision{
 		Revision: newNPR,
 		Compress: pr.Compress,
-		kvStore: pr.kvStore,
+		kvStore:  pr.kvStore,
 	}
 
 	newPR.Finalize()
@@ -200,7 +200,7 @@
 	newPR := &PersistedRevision{
 		Revision: newNPR,
 		Compress: pr.Compress,
-		kvStore: pr.kvStore,
+		kvStore:  pr.kvStore,
 	}
 
 	newPR.Finalize()
@@ -214,7 +214,7 @@
 	newPR := &PersistedRevision{
 		Revision: newNPR,
 		Compress: pr.Compress,
-		kvStore: pr.kvStore,
+		kvStore:  pr.kvStore,
 	}
 
 	newPR.Finalize()
diff --git a/db/model/profiling.go b/db/model/profiling.go
index 9d13d5a..1f97839 100644
--- a/db/model/profiling.go
+++ b/db/model/profiling.go
@@ -16,8 +16,8 @@
 package model
 
 import (
-	"sync"
 	"github.com/opencord/voltha-go/common/log"
+	"sync"
 )
 
 type profiling struct {
diff --git a/db/model/proxy.go b/db/model/proxy.go
index 4b7a59f..4aae7f4 100644
--- a/db/model/proxy.go
+++ b/db/model/proxy.go
@@ -16,13 +16,13 @@
 package model
 
 import (
-	"fmt"
-	"strings"
-	"reflect"
 	"crypto/md5"
-	"github.com/opencord/voltha-go/common/log"
 	"errors"
+	"fmt"
+	"github.com/opencord/voltha-go/common/log"
+	"reflect"
 	"runtime"
+	"strings"
 )
 
 type OperationContext struct {
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
index 0984016..ad02eb6 100644
--- a/db/model/proxy_test.go
+++ b/db/model/proxy_test.go
@@ -16,15 +16,15 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/protos/voltha"
-	"testing"
-	"github.com/opencord/voltha-go/common/log"
-	"strconv"
-	"reflect"
-	"github.com/google/uuid"
 	"encoding/hex"
 	"encoding/json"
 	"fmt"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"reflect"
+	"strconv"
+	"testing"
 )
 
 type proxyTest struct {
diff --git a/db/model/root.go b/db/model/root.go
index b4885c1..1c14f9a 100644
--- a/db/model/root.go
+++ b/db/model/root.go
@@ -20,9 +20,9 @@
 	"encoding/json"
 	"fmt"
 	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
 	"reflect"
 	"time"
-	"github.com/opencord/voltha-go/common/log"
 )
 
 type Root struct {
@@ -222,7 +222,7 @@
 }
 
 type rootData struct {
-	Latest string `json:latest`
+	Latest string            `json:latest`
 	Tags   map[string]string `json:tags`
 }
 
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
index 4a0cc29..064b9ef 100644
--- a/db/model/transaction_test.go
+++ b/db/model/transaction_test.go
@@ -16,24 +16,24 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/protos/voltha"
-	"github.com/opencord/voltha-go/common/log"
-	"testing"
-	"github.com/google/uuid"
 	"encoding/hex"
-	"strconv"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/protos/voltha"
 	"reflect"
+	"strconv"
+	"testing"
 )
 
 type transactionTest struct {
-	Root        *Root
-	Backend     *Backend
-	Proxy       *Proxy
-	DbPrefix    string
-	DbType      string
-	DbHost      string
-	DbPort      int
-	DbTimeout   int
+	Root      *Root
+	Backend   *Backend
+	Proxy     *Proxy
+	DbPrefix  string
+	DbType    string
+	DbHost    string
+	DbPort    int
+	DbTimeout int
 }
 
 var (
@@ -46,7 +46,7 @@
 		DbTimeout: 5,
 	}
 	txTargetDevId string
-	txDevId string
+	txDevId       string
 )
 
 func init() {
@@ -148,7 +148,7 @@
 
 func Test_Transaction_5_RemoveDevice(t *testing.T) {
 	removeTx := tx.Proxy.openTransaction()
-	if removed := removeTx.Remove("/devices/"+txDevId); removed == nil {
+	if removed := removeTx.Remove("/devices/" + txDevId); removed == nil {
 		t.Error("Failed to remove device")
 	} else {
 		t.Logf("Removed device : %+v", removed)
diff --git a/db/model/utils_test.go b/db/model/utils_test.go
index 644e6e2..c4cc60d 100644
--- a/db/model/utils_test.go
+++ b/db/model/utils_test.go
@@ -16,17 +16,17 @@
 package model
 
 import (
-	"testing"
 	"github.com/opencord/voltha-go/protos/voltha"
 	"reflect"
+	"testing"
 )
 
 func Test_Utils_Clone(t *testing.T) {
 	a := &voltha.Device{
-		Id: "abcde",
+		Id:              "abcde",
 		FirmwareVersion: "someversion",
 	}
-	b:= &voltha.Device{}
+	b := &voltha.Device{}
 	Clone(reflect.ValueOf(a).Interface(), b)
 	t.Logf("A: %+v, B: %+v", a, b)
 	b.Id = "12345"
diff --git a/docker/Dockerfile.rw_core b/docker/Dockerfile.rw_core
index 0c6b79c..c2a78a3 100644
--- a/docker/Dockerfile.rw_core
+++ b/docker/Dockerfile.rw_core
@@ -1,7 +1,7 @@
 # -------------
 # Build stage
 
-FROM golang:alpine AS build-env
+FROM golang:1.9.2-alpine AS build-env
 
 # Install required packages
 RUN apk add --no-cache wget git make build-base protobuf protobuf-dev
@@ -32,7 +32,7 @@
 RUN sh /src/protos/build_protos.sh /src/protos
 
 # Build rw_core
-RUN cd $GOPATH/src/github.com/opencord/voltha-go/rw_core && go get -d ./... && rm -rf $GOPATH/src/github.com/coreos/etcd/vendor/golang.org/x/net/trace && go build -o /src/rw_core
+RUN cd $GOPATH/src/github.com/opencord/voltha-go/rw_core && go get -d ./... && rm -rf $GOPATH/src/go.etcd.io/etcd/vendor/golang.org/x/net/trace && go build -o /src/rw_core
 
 # -------------
 # Image creation stage
diff --git a/kafka/kafka_inter_container_library.go b/kafka/kafka_inter_container_library.go
index 00086e3..c00fb60 100644
--- a/kafka/kafka_inter_container_library.go
+++ b/kafka/kafka_inter_container_library.go
@@ -33,7 +33,7 @@
 
 // Initialize the logger - gets the default until the main function setup the logger
 func init() {
-	log.GetLogger()
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
 }
 
 const (
@@ -49,7 +49,7 @@
 	DefaultReturnErrors      = true
 	DefaultConsumerMaxwait   = 50
 	DefaultMaxProcessingTime = 100
-	DefaultRequestTimeout    = 50 // 50 milliseconds
+	DefaultRequestTimeout    = 200 // 200 milliseconds - to handle a wider latency range
 )
 
 type consumerChannels struct {
@@ -66,6 +66,7 @@
 	producer                      sarama.AsyncProducer
 	consumer                      sarama.Consumer
 	doneCh                        chan int
+	waitForResponseRoutineStarted bool
 	topicToConsumerChannelMap     map[string]*consumerChannels
 	transactionIdToChannelMap     map[string]chan *ca.InterContainerMessage
 	lockTopicToConsumerChannelMap sync.RWMutex
@@ -181,12 +182,15 @@
 	go kp.sendToKafkaTopic(protoRequest, topic)
 
 	if waitForResponse {
-		// if ctx is nil use a default timeout ctx to ensure we do not wait forever
+		// Create a child context based on the parent context, if any
 		var cancel context.CancelFunc
+		childCtx := context.Background()
 		if ctx == nil {
 			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
-			defer cancel()
+		} else {
+			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
 		}
+		defer cancel()
 
 		// Wait for response as well as timeout or cancellation
 		// Remove the subscription for a response on return
@@ -210,6 +214,15 @@
 				return false, nil // Should never happen
 			}
 			return false, marshalledArg
+		case <-childCtx.Done():
+			log.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			//	 pack the error as proto any type
+			protoError := &ca.Error{Reason: childCtx.Err().Error()}
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
 		case <-kp.doneCh:
 			log.Infow("received-exit-signal", log.Fields{"topic": topic.Name, "rpc": rpc})
 			return true, nil
@@ -270,7 +283,7 @@
 	return nil
 }
 
-func (kp *KafkaMessagingProxy) addToTopicToConsumerChannelMap(id string, arg *consumerChannels) {
+func (kp *KafkaMessagingProxy) addTopicToConsumerChannelMap(id string, arg *consumerChannels) {
 	kp.lockTopicToConsumerChannelMap.Lock()
 	defer kp.lockTopicToConsumerChannelMap.Unlock()
 	if _, exist := kp.topicToConsumerChannelMap[id]; !exist {
@@ -533,34 +546,34 @@
 				returnedValues[0] = returnError
 			} else {
 				log.Debugw("returned-api-response", log.Fields{"len": len(out), "err": err})
-				returnSize := 1 // Minimum array size
-				if len(out) > 1 {
-					returnSize = len(out) - 1
-				}
-				returnedValues = make([]interface{}, returnSize)
-				for idx, val := range out {
-					log.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
-					if idx == 0 {
-						if val.Interface() != nil {
-							if goError, ok := out[0].Interface().(error); ok {
-								returnError = &ca.Error{Reason: goError.Error()}
-								returnedValues[0] = returnError
-							} // Else should never occur - maybe never say never?
-							break
-						} else {
-							success = true
+				returnedValues = make([]interface{}, 0)
+				// Check for errors first
+				lastIndex := len(out) - 1
+				if out[lastIndex].Interface() != nil { // Error
+					if goError, ok := out[lastIndex].Interface().(error); ok {
+						returnError = &ca.Error{Reason: goError.Error()}
+						returnedValues = append(returnedValues, returnError)
+					} else { // Should never happen
+						returnError = &ca.Error{Reason: "incorrect-error-returns"}
+						returnedValues = append(returnedValues, returnError)
+					}
+				} else { // Non-error case
+					success = true
+					for idx, val := range out {
+						log.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						if idx != lastIndex {
+							returnedValues = append(returnedValues, val.Interface())
 						}
-					} else {
-						returnedValues[idx-1] = val.Interface()
 					}
 				}
 			}
 
 			var icm *ca.InterContainerMessage
 			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
-				log.Warnw("error-encoding-response-returning-failure-result", log.Fields{"erroe": err})
+				log.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
 				icm = encodeDefaultFailedResponse(msg)
 			}
+			log.Debugw("sending-to-kafka", log.Fields{"msg": icm, "send-to-topic": msg.Header.FromTopic})
 			kp.sendToKafkaTopic(icm, &Topic{Name: msg.Header.FromTopic})
 		}
 
@@ -605,7 +618,7 @@
 		case err := <-consumerCh.consumer.Errors():
 			log.Warnw("consumer-error", log.Fields{"error": err})
 		case msg := <-consumerCh.consumer.Messages():
-			log.Debugw("message-received", log.Fields{"msg": msg})
+			//log.Debugw("message-received", log.Fields{"msg": msg})
 			// Since the only expected message is a proto intercontainermessage then extract it right away
 			// instead of dispatching it to the consumers
 			msgBody := msg.Value
@@ -614,9 +627,17 @@
 				log.Warnw("invalid-message", log.Fields{"error": err})
 				continue
 			}
-			log.Debugw("msg-to-consumers", log.Fields{"msg": *icm, "len": len(consumerCh.channels)})
-
-			go kp.dispatchToConsumers(consumerCh, icm)
+			if icm.Header.Type == ca.MessageType_REQUEST {
+				log.Debugw("request-received", log.Fields{"msg": *icm, "len": len(consumerCh.channels)})
+				go kp.dispatchToConsumers(consumerCh, icm)
+			} else if icm.Header.Type == ca.MessageType_RESPONSE {
+				log.Debugw("response-received", log.Fields{"msg": *icm, "len": len(consumerCh.channels)})
+				go kp.dispatchResponse(icm)
+			} else {
+				log.Debugw("unsupported-msg-received", log.Fields{"msg": *icm})
+			}
+			//// TODO:  Dispatch requests and responses separately
+			//go kp.dispatchToConsumers(consumerCh, icm)
 		case <-kp.doneCh:
 			log.Infow("received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
@@ -636,6 +657,7 @@
 
 func (kp *KafkaMessagingProxy) waitForResponse(ch chan *ca.InterContainerMessage, topic Topic) {
 	log.Debugw("starting-consuming-responses-loop", log.Fields{"topic": topic.Name})
+	kp.waitForResponseRoutineStarted = true
 startloop:
 	for {
 		select {
@@ -681,7 +703,7 @@
 	}
 
 	// Add the consumer channel to the map
-	kp.addToTopicToConsumerChannelMap(topic.Name, cc)
+	kp.addTopicToConsumerChannelMap(topic.Name, cc)
 
 	//Start a consumer to listen on that specific topic
 	go kp.consumeMessagesLoop(topic)
@@ -694,18 +716,16 @@
 // API. There is one response channel waiting for kafka messages before dispatching the message to the
 // corresponding waiting channel
 func (kp *KafkaMessagingProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ca.InterContainerMessage, error) {
-	log.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name})
+	log.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
 
 	if consumerCh := kp.getConsumerChannel(topic); consumerCh == nil {
 		log.Debugw("topic-not-subscribed", log.Fields{"topic": topic.Name})
-		var consumerListeningChannel chan *ca.InterContainerMessage
 		var err error
-		if consumerListeningChannel, err = kp.setupConsumerChannel(topic); err != nil {
+
+		if _, err = kp.setupConsumerChannel(topic); err != nil {
 			log.Warnw("create-consumer-channel-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
-		// Start a go routine to listen to response messages over the consumer listening channel
-		go kp.waitForResponse(consumerListeningChannel, topic)
 	}
 
 	ch := make(chan *ca.InterContainerMessage)
diff --git a/kafka/messaging_interface.go b/kafka/messaging_interface.go
index 20784b5..78d9e75 100644
--- a/kafka/messaging_interface.go
+++ b/kafka/messaging_interface.go
@@ -15,12 +15,6 @@
  */
 package kafka
 
-import (
-	"context"
-)
-
-type callback func(bool, interface{})
-
 // A Topic definition - may be augmented with additional attributes eventually
 type Topic struct {
 	// The name of the topic. It must start with a letter,
@@ -34,12 +28,3 @@
 	Key   string
 	Value interface{}
 }
-
-// Client represents the set of APIs a Messaging Client must implement - In progress
-type Client interface {
-	Start()
-	Stop()
-	Subscribe(ctx context.Context, topic *Topic, cb callback, targetInterfaces ...interface{})
-	Publish(ctx context.Context, rpc string, cb callback, topic *Topic, waitForResponse bool, kvArgs ...*KVArg)
-	Unsubscribe(ctx context.Context, topic *Topic)
-}
diff --git a/protos/core_adapter.proto b/protos/core_adapter.proto
index 5021b9a..d828194 100644
--- a/protos/core_adapter.proto
+++ b/protos/core_adapter.proto
@@ -1,6 +1,9 @@
 syntax = "proto3";
 
 import "google/protobuf/any.proto";
+import "openflow_13.proto";
+import public "logical_device.proto";
+
 
 option go_package = "github.com/opencord/voltha-go/protos/core_adapter";
 
@@ -18,8 +21,14 @@
     bool val = 1;
 }
 
+enum ErrorCode {
+        UNSUPPORTED_REQUEST = 0;
+        INVALID_PARAMETERS = 1;
+}
+
 message Error {
-    string reason = 1;
+    ErrorCode code = 1;
+    string reason = 2;
 }
 
 enum MessageType {
@@ -56,3 +65,12 @@
     bool success = 1;
     google.protobuf.Any result = 3;
 }
+
+message SwitchCapability {
+    openflow_13.ofp_desc desc = 1;
+    openflow_13.ofp_switch_features switch_features = 2;
+}
+
+message PortCapability {
+    LogicalPort port = 1;
+}
\ No newline at end of file
diff --git a/protos/device.proto b/protos/device.proto
index f991e44..9c2d98c 100644
--- a/protos/device.proto
+++ b/protos/device.proto
@@ -250,10 +250,10 @@
         uint32 onu_session_id = 4; // session identifier for the ONU; optional
     };
 
-    oneof address {
-        // Device contact MAC address (format: "xx:xx:xx:xx:xx:xx")
-        string mac_address = 13;
+    // Device contact MAC address (format: "xx:xx:xx:xx:xx:xx")
+    string mac_address = 13;
 
+    oneof address {
         // Device contact IPv4 address (format: "a.b.c.d" or can use hostname too)
         string ipv4_address = 14;
 
diff --git a/protos/ponsim.proto b/protos/ponsim.proto
index 2ed8914..e477d98 100644
--- a/protos/ponsim.proto
+++ b/protos/ponsim.proto
@@ -8,9 +8,14 @@
 import "openflow_13.proto";
 
 
+message PonSimOnuDeviceInfo {
+    int32 uni_port = 1;
+    string serial_number = 2;
+}
+
 message PonSimDeviceInfo {
     int32 nni_port = 1;
-    repeated int32 uni_ports = 2;
+    repeated PonSimOnuDeviceInfo onus = 2;
 }
 
 message FlowTable {
@@ -21,6 +26,7 @@
 message PonSimFrame {
     string id = 1;
     bytes payload = 2;
+    int32 out_port = 3;
 }
 
 message PonSimPacketCounter {
diff --git a/protos/voltha.proto b/protos/voltha.proto
index 562ed60..92d59dd 100644
--- a/protos/voltha.proto
+++ b/protos/voltha.proto
@@ -72,6 +72,7 @@
 
 message Logging {
     LogLevel.LogLevel level = 1;
+    string package_name = 2;
 }
 
 // Top-level (root) node for a Voltha Instance
diff --git a/rw_core/config/config.go b/rw_core/config/config.go
index 32b7abb..a999d7b 100644
--- a/rw_core/config/config.go
+++ b/rw_core/config/config.go
@@ -16,11 +16,9 @@
 package config
 
 import (
-	//"context"
 	"flag"
 	"fmt"
-	//dt "github.com/docker/docker/api/types"
-	//dc "github.com/docker/docker/client"
+	"github.com/opencord/voltha-go/common/log"
 	"os"
 )
 
@@ -31,13 +29,13 @@
 	default_InstanceID       = "rwcore001"
 	default_GrpcPort         = 50057
 	default_GrpcHost         = "127.0.0.1"
-	default_KafkaAdapterHost = "10.100.198.240"
+	default_KafkaAdapterHost = "10.176.230.190"
 	default_KafkaAdapterPort = 9092
-	default_KafkaClusterHost = "10.100.198.240"
+	default_KafkaClusterHost = "10.176.215.107"
 	default_KafkaClusterPort = 9094
 	default_KVStoreType      = ConsulStoreName
 	default_KVStoreTimeout   = 5 //in seconds
-	default_KVStoreHost      = "10.100.198.240"
+	default_KVStoreHost      = "10.176.230.190"
 	default_KVStorePort      = 8500 // Etcd = 2379
 	default_LogLevel         = 0
 	default_Banner           = false
@@ -71,6 +69,10 @@
 	RWCoreCA         string
 }
 
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
 // NewRWCoreFlags returns a new RWCore config
 func NewRWCoreFlags() *RWCoreFlags {
 	var rwCoreFlag = RWCoreFlags{ // Default values
@@ -145,17 +147,11 @@
 
 	flag.Parse()
 
-	// Update the necessary keys with the prefixes
-	//start := time.Now()
 	containerName := getContainerInfo()
-	//fmt.Println("container name:", containerName)
 	if len(containerName) > 0 {
 		cf.InstanceID = containerName
 	}
 
-	//fmt.Println("Inside config:", cf)
-	//elapsed := time.Since(start)
-	//fmt.Println("time:", elapsed/time.Second)
 }
 
 func getContainerInfo() string {
diff --git a/rw_core/core/adapter_proxy.go b/rw_core/core/adapter_proxy.go
new file mode 100644
index 0000000..a0e25f3
--- /dev/null
+++ b/rw_core/core/adapter_proxy.go
@@ -0,0 +1,236 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/kafka"
+	ca "github.com/opencord/voltha-go/protos/core_adapter"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type AdapterProxy struct {
+	TestMode   bool
+	kafkaProxy *kafka.KafkaMessagingProxy
+}
+
+func NewAdapterProxy(kafkaProxy *kafka.KafkaMessagingProxy) *AdapterProxy {
+	var proxy AdapterProxy
+	proxy.kafkaProxy = kafkaProxy
+	return &proxy
+}
+
+func (ap *AdapterProxy) AdoptDevice(ctx context.Context, device *voltha.Device) error {
+	log.Debugw("AdoptDevice", log.Fields{"device": device})
+	topic := kafka.Topic{Name: device.Type}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	success, result := ap.kafkaProxy.InvokeRPC(ctx, "adopt_device", &topic, true, args...)
+	log.Debugw("AdoptDevice-response", log.Fields{"deviceid": device.Id, "success": success, "result": result})
+	if success {
+		return nil
+	} else {
+		unpackResult := &ca.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		log.Debugw("AdoptDevice-return", log.Fields{"deviceid": device.Id, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *AdapterProxy) AdapterDescriptor() (*voltha.Adapter, error) {
+	log.Debug("AdapterDescriptor")
+	return nil, nil
+}
+
+func (ap *AdapterProxy) DeviceTypes() (*voltha.DeviceType, error) {
+	log.Debug("DeviceTypes")
+	return nil, nil
+}
+
+func (ap *AdapterProxy) Health() (*voltha.HealthStatus, error) {
+	log.Debug("Health")
+	return nil, nil
+}
+
+func (ap *AdapterProxy) ReconcileDevice(device voltha.Device) error {
+	log.Debug("ReconcileDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) AbandonDevice(device voltha.Device) error {
+	log.Debug("AbandonDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) DisableDevice(device voltha.Device) error {
+	log.Debug("DisableDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) ReEnableDevice(device voltha.Device) error {
+	log.Debug("ReEnableDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) RebootDevice(device voltha.Device) error {
+	log.Debug("RebootDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) DeleteDevice(device voltha.Device) error {
+	log.Debug("DeleteDevice")
+	return nil
+}
+
+func (ap *AdapterProxy) GetDeviceDetails(device voltha.Device) (*voltha.Device, error) {
+	log.Debug("GetDeviceDetails")
+	return nil, nil
+}
+
+func (ap *AdapterProxy) DownloadImage(device voltha.Device, download voltha.ImageDownload) error {
+	log.Debug("DownloadImage")
+	return nil
+}
+
+func (ap *AdapterProxy) GetImageDownloadStatus(device voltha.Device, download voltha.ImageDownload) error {
+	log.Debug("GetImageDownloadStatus")
+	return nil
+}
+
+func (ap *AdapterProxy) CancelImageDownload(device voltha.Device, download voltha.ImageDownload) error {
+	log.Debug("CancelImageDownload")
+	return nil
+}
+
+func (ap *AdapterProxy) ActivateImageUpdate(device voltha.Device, download voltha.ImageDownload) error {
+	log.Debug("ActivateImageUpdate")
+	return nil
+}
+
+func (ap *AdapterProxy) RevertImageUpdate(device voltha.Device, download voltha.ImageDownload) error {
+	log.Debug("RevertImageUpdate")
+	return nil
+}
+
+func (ap *AdapterProxy) SelfTestDevice(device voltha.Device) (*voltha.SelfTestResponse, error) {
+	log.Debug("SelfTestDevice")
+	return nil, nil
+}
+
+func (ap *AdapterProxy) UpdateFlowsBulk(device voltha.Device, flows voltha.Flows, groups voltha.FlowGroups) error {
+	log.Debug("UpdateFlowsBulk")
+	return nil
+}
+
+func (ap *AdapterProxy) UpdateFlowsIncremental(device voltha.Device, flowChanges voltha.Flows, groupChanges voltha.FlowGroups) error {
+	log.Debug("UpdateFlowsIncremental")
+	return nil
+}
+
+func (ap *AdapterProxy) UpdatePmConfig(device voltha.Device, pmConfigs voltha.PmConfigs) error {
+	log.Debug("UpdatePmConfig")
+	return nil
+}
+
+func (ap *AdapterProxy) ReceivePacketOut(deviceId voltha.ID, egressPortNo int, msg interface{}) error {
+	log.Debug("ReceivePacketOut")
+	return nil
+}
+
+func (ap *AdapterProxy) SuppressAlarm(filter voltha.AlarmFilter) error {
+	log.Debug("SuppressAlarm")
+	return nil
+}
+
+func (ap *AdapterProxy) UnSuppressAlarm(filter voltha.AlarmFilter) error {
+	log.Debug("UnSuppressAlarm")
+	return nil
+}
+
+func (ap *AdapterProxy) GetOfpDeviceInfo(ctx context.Context, device *voltha.Device) (*ca.SwitchCapability, error) {
+	log.Debugw("GetOfpDeviceInfo", log.Fields{"device": device})
+	topic := kafka.Topic{Name: device.Type}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	success, result := ap.kafkaProxy.InvokeRPC(ctx, "get_ofp_device_info", &topic, true, args...)
+	log.Debugw("GetOfpDeviceInfo-response", log.Fields{"device": device, "success": success, "result": result})
+	if success {
+		unpackResult := &ca.SwitchCapability{}
+		if err := ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return unpackResult, nil
+	} else {
+		unpackResult := &ca.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		log.Debugw("GetOfpDeviceInfo-return", log.Fields{"deviceid": device.Id, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
+
+func (ap *AdapterProxy) GetOfpPortInfo(ctx context.Context, device *voltha.Device, portNo uint32) (*ca.PortCapability, error) {
+	log.Debug("GetOfpPortInfo", log.Fields{"device": device})
+	topic := kafka.Topic{Name: device.Type}
+	args := make([]*kafka.KVArg, 2)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	pNo := &ca.IntType{Val: int64(portNo)}
+	args[1] = &kafka.KVArg{
+		Key:   "port_no",
+		Value: pNo,
+	}
+
+	success, result := ap.kafkaProxy.InvokeRPC(ctx, "get_ofp_port_info", &topic, true, args...)
+	log.Debugw("GetOfpPortInfo-response", log.Fields{"deviceid": device.Id, "device": device, "success": success, "result": result})
+	if success {
+		unpackResult := &ca.PortCapability{}
+		if err := ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
+		}
+		return unpackResult, nil
+	} else {
+		unpackResult := &ca.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		log.Debugw("GetOfpPortInfo-return", log.Fields{"deviceid": device.Id, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
+	}
+}
diff --git a/rw_core/core/adapter_request_handler.go b/rw_core/core/adapter_request_handler.go
new file mode 100644
index 0000000..95ad69d
--- /dev/null
+++ b/rw_core/core/adapter_request_handler.go
@@ -0,0 +1,355 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"errors"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	ca "github.com/opencord/voltha-go/protos/core_adapter"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type AdapterRequestHandlerProxy struct {
+	TestMode         bool
+	deviceMgr        *DeviceManager
+	lDeviceMgr       *LogicalDeviceManager
+	localDataProxy   *model.Proxy
+	clusterDataProxy *model.Proxy
+}
+
+func NewAdapterRequestHandlerProxy(dMgr *DeviceManager, ldMgr *LogicalDeviceManager, cdProxy *model.Proxy, ldProxy *model.Proxy) *AdapterRequestHandlerProxy {
+	var proxy AdapterRequestHandlerProxy
+	proxy.deviceMgr = dMgr
+	proxy.lDeviceMgr = ldMgr
+	proxy.clusterDataProxy = cdProxy
+	proxy.localDataProxy = ldProxy
+	return &proxy
+}
+
+func (rhp *AdapterRequestHandlerProxy) Register(args []*ca.Argument) (*voltha.CoreInstance, error) {
+	if len(args) != 1 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	adapter := &voltha.Adapter{}
+	if err := ptypes.UnmarshalAny(args[0].Value, adapter); err != nil {
+		log.Warnw("cannot-unmarshal-adapter", log.Fields{"error": err})
+		return nil, err
+	}
+	log.Debugw("Register", log.Fields{"Adapter": *adapter})
+	// TODO process the request and store the data in the KV store
+
+	if rhp.TestMode { // Execute only for test cases
+		return &voltha.CoreInstance{InstanceId: "CoreInstance"}, nil
+	}
+	return &voltha.CoreInstance{InstanceId: "CoreInstance"}, nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) GetDevice(args []*ca.Argument) (*voltha.Device, error) {
+	if len(args) != 1 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	pID := &voltha.ID{}
+	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
+		log.Warnw("cannot-unmarshal-ID", log.Fields{"error": err})
+		return nil, err
+	}
+	log.Debugw("GetDevice", log.Fields{"deviceId": pID.Id})
+
+	if rhp.TestMode { // Execute only for test cases
+		return &voltha.Device{Id: pID.Id}, nil
+	}
+
+	// Get the device via the device manager
+	if device, err := rhp.deviceMgr.getDevice(pID.Id); err != nil {
+		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
+	} else {
+		return device, nil
+	}
+}
+
+func (rhp *AdapterRequestHandlerProxy) DeviceUpdate(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) != 1 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	device := &voltha.Device{}
+	if err := ptypes.UnmarshalAny(args[0].Value, device); err != nil {
+		log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+		return nil, err
+	}
+	log.Debugw("DeviceUpdate", log.Fields{"device": device})
+
+	if rhp.TestMode { // Execute only for test cases
+		return new(empty.Empty), nil
+	}
+	if err := rhp.deviceMgr.updateDevice(device); err != nil {
+		log.Debugw("DeviceUpdate-error", log.Fields{"device": device, "error": err})
+		return nil, status.Errorf(codes.Internal, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) GetChildDevice(args []*ca.Argument) (*voltha.Device, error) {
+	if len(args) < 1 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	pID := &ca.StrType{}
+	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
+		log.Warnw("cannot-unmarshal-ID", log.Fields{"error": err})
+		return nil, err
+	}
+	log.Debugw("GetChildDevice", log.Fields{"deviceId": pID.Val})
+
+	if rhp.TestMode { // Execute only for test cases
+		return &voltha.Device{Id: pID.Val}, nil
+	}
+	return nil, nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) GetPorts(args []*ca.Argument) (*voltha.Ports, error) {
+	if len(args) != 2 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	pID := &ca.StrType{}
+	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
+		log.Warnw("cannot-unmarshal-ID", log.Fields{"error": err})
+		return nil, err
+	}
+	// Porttype is an enum sent as an integer proto
+	pt := &ca.IntType{}
+	if err := ptypes.UnmarshalAny(args[1].Value, pt); err != nil {
+		log.Warnw("cannot-unmarshal-porttype", log.Fields{"error": err})
+		return nil, err
+	}
+
+	log.Debugw("GetPorts", log.Fields{"deviceID": pID.Val, "portype": pt.Val})
+
+	if rhp.TestMode { // Execute only for test cases
+		aPort := &voltha.Port{Label: "test_port"}
+		allPorts := &voltha.Ports{}
+		allPorts.Items = append(allPorts.Items, aPort)
+		return allPorts, nil
+	}
+	return nil, nil
+
+}
+
+func (rhp *AdapterRequestHandlerProxy) GetChildDevices(args []*ca.Argument) (*voltha.Device, error) {
+	if len(args) != 1 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	pID := &ca.StrType{}
+	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
+		log.Warnw("cannot-unmarshal-ID", log.Fields{"error": err})
+		return nil, err
+	}
+	log.Debugw("GetChildDevice", log.Fields{"deviceId": pID.Val})
+
+	if rhp.TestMode { // Execute only for test cases
+		return &voltha.Device{Id: pID.Val}, nil
+	}
+	//TODO: Complete
+	return nil, nil
+}
+
+// ChildDeviceDetected is invoked when a child device is detected.  The following
+// parameters are expected:
+// {parent_device_id, parent_port_no, child_device_type, proxy_address, admin_state, **kw)
+func (rhp *AdapterRequestHandlerProxy) ChildDeviceDetected(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) < 4 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+
+	pID := &voltha.ID{}
+	portNo := &ca.IntType{}
+	dt := &ca.StrType{}
+	chnlId := &ca.IntType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "parent_device_id":
+			if err := ptypes.UnmarshalAny(arg.Value, pID); err != nil {
+				log.Warnw("cannot-unmarshal-parent-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "parent_port_no":
+			if err := ptypes.UnmarshalAny(arg.Value, portNo); err != nil {
+				log.Warnw("cannot-unmarshal-parent-port", log.Fields{"error": err})
+				return nil, err
+			}
+		case "child_device_type":
+			if err := ptypes.UnmarshalAny(arg.Value, dt); err != nil {
+				log.Warnw("cannot-unmarshal-child-device-type", log.Fields{"error": err})
+				return nil, err
+			}
+		case "channel_id":
+			if err := ptypes.UnmarshalAny(arg.Value, chnlId); err != nil {
+				log.Warnw("cannot-unmarshal-channel-id", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	log.Debugw("ChildDeviceDetected", log.Fields{"parentDeviceId": pID.Id, "parentPortNo": portNo.Val,
+		"deviceType": dt.Val, "channelId": chnlId.Val})
+
+	if rhp.TestMode { // Execute only for test cases
+		return nil, nil
+	}
+
+	// Run child detection in it's own go routine as it can be a lengthy process
+	go rhp.deviceMgr.childDeviceDetected(pID.Id, portNo.Val, dt.Val, chnlId.Val)
+
+	return new(empty.Empty), nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) DeviceStateUpdate(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) < 2 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &voltha.ID{}
+	operStatus := &ca.IntType{}
+	connStatus := &ca.IntType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device_id":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				log.Warnw("cannot-unmarshal-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "oper_status":
+			if err := ptypes.UnmarshalAny(arg.Value, operStatus); err != nil {
+				log.Warnw("cannot-unmarshal-operStatus", log.Fields{"error": err})
+				return nil, err
+			}
+			if operStatus.Val == -1 {
+				operStatus = nil
+			}
+		case "connect_status":
+			if err := ptypes.UnmarshalAny(arg.Value, connStatus); err != nil {
+				log.Warnw("cannot-unmarshal-connStatus", log.Fields{"error": err})
+				return nil, err
+			}
+			if connStatus.Val == -1 {
+				connStatus = nil
+			}
+		}
+	}
+
+	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId.Id, "oper-status": operStatus, "conn-status": connStatus})
+
+	if rhp.TestMode { // Execute only for test cases
+		return nil, nil
+	}
+	if err := rhp.deviceMgr.updateDeviceState(deviceId.Id, operStatus, connStatus); err != nil {
+		log.Debugw("DeviceUpdate-error", log.Fields{"deviceId": deviceId.Id, "error": err})
+		return nil, status.Errorf(codes.Internal, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) PortCreated(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) != 2 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	deviceId := &voltha.ID{}
+	port := &voltha.Port{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device_id":
+			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
+				log.Warnw("cannot-unmarshal-device-id", log.Fields{"error": err})
+				return nil, err
+			}
+		case "port":
+			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
+				log.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	log.Debugw("PortCreated", log.Fields{"deviceId": deviceId.Id, "port": port})
+
+	if rhp.TestMode { // Execute only for test cases
+		return nil, nil
+	}
+
+	if err := rhp.deviceMgr.addPort(deviceId.Id, port); err != nil {
+		log.Debugw("addport-error", log.Fields{"deviceId": deviceId.Id, "error": err})
+		return nil, status.Errorf(codes.Internal, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+}
+
+func (rhp *AdapterRequestHandlerProxy) DevicePMConfigUpdate(args []*ca.Argument) (*empty.Empty, error) {
+	if len(args) != 2 {
+		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	pmConfigs := &voltha.PmConfigs{}
+	init := &ca.BoolType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "device_pm_config":
+			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
+				log.Warnw("cannot-unmarshal-pm-config", log.Fields{"error": err})
+				return nil, err
+			}
+		case "init":
+			if err := ptypes.UnmarshalAny(arg.Value, init); err != nil {
+				log.Warnw("cannot-unmarshal-boolean", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	log.Debugw("DevicePMConfigUpdate", log.Fields{"deviceId": pmConfigs.Id, "configs": pmConfigs,
+		"init": init})
+
+	if rhp.TestMode { // Execute only for test cases
+		return nil, nil
+	}
+
+	if err := rhp.deviceMgr.updatePmConfigs(pmConfigs.Id, pmConfigs); err != nil {
+		log.Debugw("update-pmconfigs-error", log.Fields{"deviceId": pmConfigs.Id, "error": err})
+		return nil, status.Errorf(codes.Internal, "%s", err.Error())
+	}
+	return new(empty.Empty), nil
+
+}
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
new file mode 100644
index 0000000..f9e42ef
--- /dev/null
+++ b/rw_core/core/core.go
@@ -0,0 +1,149 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	grpcserver "github.com/opencord/voltha-go/common/grpc"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"github.com/opencord/voltha-go/rw_core/config"
+	"google.golang.org/grpc"
+	"reflect"
+)
+
+type Core struct {
+	instanceId        string
+	deviceMgr         *DeviceManager
+	logicalDeviceMgr  *LogicalDeviceManager
+	grpcServer        *grpcserver.GrpcServer
+	grpcNBIAPIHanfler *APIHandler
+	config            *config.RWCoreFlags
+	kmp               *kafka.KafkaMessagingProxy
+	clusterDataRoot   *model.Root
+	localDataRoot     *model.Root
+	clusterDataProxy  *model.Proxy
+	localDataProxy    *model.Proxy
+	exitChannel       chan int
+}
+
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
+func NewCore(id string, cf *config.RWCoreFlags) *Core {
+	var core Core
+	core.instanceId = id
+	core.exitChannel = make(chan int, 1)
+	core.config = cf
+	// TODO: Setup the KV store
+	core.clusterDataRoot = model.NewRoot(&voltha.Voltha{}, nil, reflect.TypeOf(model.NonPersistedRevision{}))
+	core.localDataRoot = model.NewRoot(&voltha.CoreInstance{}, nil, reflect.TypeOf(model.NonPersistedRevision{}))
+	core.clusterDataProxy = core.clusterDataRoot.Node.GetProxy("/", false)
+	core.localDataProxy = core.localDataRoot.Node.GetProxy("/", false)
+	return &core
+}
+
+func (core *Core) Start(ctx context.Context) {
+	log.Info("starting-core")
+	core.startKafkaMessagingProxy(ctx)
+	log.Info("values", log.Fields{"kmp": core.kmp})
+	core.deviceMgr = NewDeviceManager(core.kmp, core.localDataProxy)
+	core.logicalDeviceMgr = NewLogicalDeviceManager(core.deviceMgr, core.kmp, core.localDataProxy)
+	core.registerAdapterRequestHandler(ctx, core.deviceMgr, core.logicalDeviceMgr, core.localDataProxy, core.clusterDataProxy)
+	go core.startDeviceManager(ctx)
+	go core.startLogicalDeviceManager(ctx)
+	go core.startGRPCService(ctx)
+
+	log.Info("core-started")
+}
+
+func (core *Core) Stop(ctx context.Context) {
+	log.Info("stopping-core")
+	core.exitChannel <- 1
+	log.Info("core-stopped")
+}
+
+//startGRPCService creates the grpc service handler, registers it to the grpc server
+// and starts the server
+func (core *Core) startGRPCService(ctx context.Context) {
+	//	create an insecure gserver server
+	core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false)
+	log.Info("grpc-server-created")
+
+	core.grpcNBIAPIHanfler = NewAPIHandler(core.deviceMgr, core.logicalDeviceMgr, core.clusterDataProxy, core.localDataProxy)
+	//	Create a function to register the core GRPC service with the GRPC server
+	f := func(gs *grpc.Server) {
+		voltha.RegisterVolthaServiceServer(
+			gs,
+			core.grpcNBIAPIHanfler,
+		)
+	}
+
+	core.grpcServer.AddService(f)
+	log.Info("grpc-service-added")
+
+	//	Start the server
+	core.grpcServer.Start(context.Background())
+	log.Info("grpc-server-started")
+}
+
+func (core *Core) startKafkaMessagingProxy(ctx context.Context) error {
+	log.Infow("starting-kafka-messaging-proxy", log.Fields{"host": core.config.KafkaAdapterHost,
+		"port": core.config.KafkaAdapterPort, "topic": core.config.CoreTopic})
+	var err error
+	if core.kmp, err = kafka.NewKafkaMessagingProxy(
+		kafka.KafkaHost(core.config.KafkaAdapterHost),
+		kafka.KafkaPort(core.config.KafkaAdapterPort),
+		kafka.DefaultTopic(&kafka.Topic{Name: core.config.CoreTopic})); err != nil {
+		log.Errorw("fail-to-create-kafka-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	if err = core.kmp.Start(); err != nil {
+		log.Fatalw("error-starting-messaging-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	log.Info("kafka-messaging-proxy-created")
+	return nil
+}
+
+func (core *Core) registerAdapterRequestHandler(ctx context.Context, dMgr *DeviceManager, ldMgr *LogicalDeviceManager,
+	cdProxy *model.Proxy, ldProxy *model.Proxy) error {
+	requestProxy := NewAdapterRequestHandlerProxy(dMgr, ldMgr, cdProxy, ldProxy)
+	core.kmp.SubscribeWithTarget(kafka.Topic{Name: core.config.CoreTopic}, requestProxy)
+
+	log.Info("request-handler")
+	return nil
+}
+
+func (core *Core) startDeviceManager(ctx context.Context) {
+	// TODO: Interaction between the logicaldevicemanager and devicemanager should mostly occur via
+	// callbacks.  For now, until the model is ready, devicemanager will keep a reference to the
+	// logicaldevicemanager to initiate the creation of logical devices
+	log.Info("starting-DeviceManager")
+	core.deviceMgr.Start(ctx, core.logicalDeviceMgr)
+	log.Info("started-DeviceManager")
+}
+
+func (core *Core) startLogicalDeviceManager(ctx context.Context) {
+	log.Info("starting-Logical-DeviceManager")
+	core.logicalDeviceMgr.Start(ctx)
+	log.Info("started-Logical-DeviceManager")
+}
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
new file mode 100644
index 0000000..aa13748
--- /dev/null
+++ b/rw_core/core/device_agent.go
@@ -0,0 +1,257 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-go/protos/core_adapter"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+)
+
+type DeviceAgent struct {
+	deviceId       string
+	lastData       *voltha.Device
+	adapterProxy   *AdapterProxy
+	deviceMgr      *DeviceManager
+	localDataProxy *model.Proxy
+	exitChannel    chan int
+}
+
+func newDeviceAgent(ap *AdapterProxy, device *voltha.Device, deviceMgr *DeviceManager, ldProxy *model.Proxy) *DeviceAgent {
+	var agent DeviceAgent
+	device.Id = CreateDeviceId()
+	agent.deviceId = device.Id
+	agent.adapterProxy = ap
+	agent.lastData = device
+	agent.deviceMgr = deviceMgr
+	agent.exitChannel = make(chan int, 1)
+	agent.localDataProxy = ldProxy
+	return &agent
+}
+
+func (agent *DeviceAgent) start(ctx context.Context) {
+	log.Debugw("starting-device-agent", log.Fields{"device": agent.lastData})
+	// Add the initial device to the local model
+	if added := agent.localDataProxy.Add("/devices", agent.lastData, ""); added == nil {
+		log.Errorw("failed-to-add-device", log.Fields{"deviceId": agent.deviceId})
+	}
+	log.Debug("device-agent-started")
+}
+
+func (agent *DeviceAgent) Stop(ctx context.Context) {
+	log.Debug("stopping-device-agent")
+	agent.exitChannel <- 1
+	log.Debug("device-agent-stopped")
+}
+
+func (agent *DeviceAgent) enableDevice(ctx context.Context) error {
+	log.Debugw("enableDevice", log.Fields{"id": agent.lastData.Id, "device": agent.lastData})
+	// Update the device status
+	if device, err := agent.deviceMgr.getDevice(agent.deviceId); err != nil {
+		return status.Errorf(codes.NotFound, "%s", agent.deviceId)
+	} else {
+		cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
+		cloned.AdminState = voltha.AdminState_ENABLED
+		cloned.OperStatus = voltha.OperStatus_ACTIVATING
+		if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+			return status.Errorf(codes.Internal, "failed-update-device:%s", agent.deviceId)
+		} else {
+			if err := agent.adapterProxy.AdoptDevice(ctx, &cloned); err != nil {
+				log.Debugw("enableDevice-error", log.Fields{"id": agent.lastData.Id, "error": err})
+				return err
+			}
+			agent.lastData = &cloned
+		}
+	}
+	return nil
+}
+
+func (agent *DeviceAgent) getNNIPorts(ctx context.Context) *voltha.Ports {
+	log.Debugw("getNNIPorts", log.Fields{"id": agent.deviceId})
+	ports := &voltha.Ports{}
+	if device, _ := agent.deviceMgr.getDevice(agent.deviceId); device != nil {
+		for _, port := range device.Ports {
+			if port.Type == voltha.Port_ETHERNET_NNI {
+				ports.Items = append(ports.Items, port)
+			}
+		}
+	}
+	return ports
+}
+
+func (agent *DeviceAgent) getSwitchCapability(ctx context.Context) (*core_adapter.SwitchCapability, error) {
+	log.Debugw("getSwitchCapability", log.Fields{"deviceId": agent.deviceId})
+	if device, err := agent.deviceMgr.getDevice(agent.deviceId); device == nil {
+		return nil, err
+	} else {
+		var switchCap *core_adapter.SwitchCapability
+		var err error
+		if switchCap, err = agent.adapterProxy.GetOfpDeviceInfo(ctx, device); err != nil {
+			log.Debugw("getSwitchCapability-error", log.Fields{"id": device.Id, "error": err})
+			return nil, err
+		}
+		return switchCap, nil
+	}
+}
+
+func (agent *DeviceAgent) getPortCapability(ctx context.Context, portNo uint32) (*core_adapter.PortCapability, error) {
+	log.Debugw("getPortCapability", log.Fields{"deviceId": agent.deviceId})
+	if device, err := agent.deviceMgr.getDevice(agent.deviceId); device == nil {
+		return nil, err
+	} else {
+		var portCap *core_adapter.PortCapability
+		var err error
+		if portCap, err = agent.adapterProxy.GetOfpPortInfo(ctx, device, portNo); err != nil {
+			log.Debugw("getPortCapability-error", log.Fields{"id": device.Id, "error": err})
+			return nil, err
+		}
+		return portCap, nil
+	}
+}
+
+func (agent *DeviceAgent) updateDevice(device *voltha.Device) error {
+	log.Debugw("updateDevice", log.Fields{"deviceId": device.Id})
+	// Get the dev info from the model
+	if storedData, err := agent.deviceMgr.getDevice(device.Id); err != nil {
+		return status.Errorf(codes.NotFound, "%s", device.Id)
+	} else {
+		// store the changed data
+		cloned := (proto.Clone(device)).(*voltha.Device)
+		afterUpdate := agent.localDataProxy.Update("/devices/"+device.Id, cloned, false, "")
+		if afterUpdate == nil {
+			return status.Errorf(codes.Internal, "%s", device.Id)
+		}
+		// Perform the state transition
+		if err := agent.deviceMgr.processTransition(storedData, cloned); err != nil {
+			log.Warnw("process-transition-error", log.Fields{"deviceid": device.Id, "error": err})
+			return err
+		}
+		return nil
+	}
+}
+
+func (agent *DeviceAgent) updateDeviceState(operState *core_adapter.IntType, connState *core_adapter.IntType) error {
+	// Work only on latest data
+	if storeDevice, err := agent.deviceMgr.getDevice(agent.deviceId); err != nil {
+		return status.Errorf(codes.NotFound, "%s", agent.deviceId)
+	} else {
+		// clone the device
+		cloned := reflect.ValueOf(storeDevice).Elem().Interface().(voltha.Device)
+		if operState != nil {
+			cloned.OperStatus = voltha.OperStatus_OperStatus(operState.Val)
+		}
+		if connState != nil {
+			cloned.ConnectStatus = voltha.ConnectStatus_ConnectStatus(connState.Val)
+		}
+		log.Debugw("DeviceStateUpdate-device", log.Fields{"device": cloned})
+		// Store the device
+		if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+			return status.Errorf(codes.Internal, "%s", agent.deviceId)
+		}
+		// Perform the state transition
+		if err := agent.deviceMgr.processTransition(storeDevice, &cloned); err != nil {
+			log.Warnw("process-transition-error", log.Fields{"deviceid": agent.deviceId, "error": err})
+			return err
+		}
+		return nil
+	}
+}
+
+func (agent *DeviceAgent) updatePmConfigs(pmConfigs *voltha.PmConfigs) error {
+	log.Debug("updatePmConfigs")
+	// Work only on latest data
+	if storeDevice, err := agent.deviceMgr.getDevice(agent.deviceId); err != nil {
+		return status.Errorf(codes.NotFound, "%s", agent.deviceId)
+	} else {
+		// clone the device
+		cloned := reflect.ValueOf(storeDevice).Elem().Interface().(voltha.Device)
+		cp := proto.Clone(pmConfigs)
+		cloned.PmConfigs = cp.(*voltha.PmConfigs)
+		// Store the device
+		afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
+		if afterUpdate == nil {
+			return status.Errorf(codes.Internal, "%s", agent.deviceId)
+		}
+		return nil
+	}
+}
+
+func (agent *DeviceAgent) addPort(port *voltha.Port) error {
+	log.Debug("addPort")
+	// Work only on latest data
+	if storeDevice, err := agent.deviceMgr.getDevice(agent.deviceId); err != nil {
+		return status.Errorf(codes.NotFound, "%s", agent.deviceId)
+	} else {
+		// clone the device
+		cloned := reflect.ValueOf(storeDevice).Elem().Interface().(voltha.Device)
+		if cloned.Ports == nil {
+			//	First port
+			cloned.Ports = make([]*voltha.Port, 0)
+		}
+		cp := proto.Clone(port)
+		cloned.Ports = append(cloned.Ports, cp.(*voltha.Port))
+		// Store the device
+		afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, "")
+		if afterUpdate == nil {
+			return status.Errorf(codes.Internal, "%s", agent.deviceId)
+		}
+		return nil
+	}
+}
+
+// TODO: A generic device update by attribute
+func (agent *DeviceAgent) updateDeviceAttribute(name string, value interface{}) {
+	if value == nil {
+		return
+	}
+	var storeDevice *voltha.Device
+	var err error
+	if storeDevice, err = agent.deviceMgr.getDevice(agent.deviceId); err != nil {
+		return
+	}
+	updated := false
+	s := reflect.ValueOf(storeDevice).Elem()
+	if s.Kind() == reflect.Struct {
+		// exported field
+		f := s.FieldByName(name)
+		if f.IsValid() && f.CanSet() {
+			switch f.Kind() {
+			case reflect.String:
+				f.SetString(value.(string))
+				updated = true
+			case reflect.Uint32:
+				f.SetUint(uint64(value.(uint32)))
+				updated = true
+			case reflect.Bool:
+				f.SetBool(value.(bool))
+				updated = true
+			}
+		}
+	}
+	log.Debugw("update-field-status", log.Fields{"device": storeDevice, "name": name, "updated": updated})
+	//	Save the data
+	cloned := reflect.ValueOf(storeDevice).Elem().Interface().(voltha.Device)
+	if afterUpdate := agent.localDataProxy.Update("/devices/"+agent.deviceId, &cloned, false, ""); afterUpdate == nil {
+		log.Warnw("attribute-update-failed", log.Fields{"attribute": name, "value": value})
+	}
+	return
+}
diff --git a/rw_core/core/device_manager.go b/rw_core/core/device_manager.go
new file mode 100644
index 0000000..a6c0c8d
--- /dev/null
+++ b/rw_core/core/device_manager.go
@@ -0,0 +1,311 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	"errors"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-go/protos/core_adapter"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+	"runtime"
+	"sync"
+)
+
+type DeviceManager struct {
+	deviceAgents        map[string]*DeviceAgent
+	adapterProxy        *AdapterProxy
+	logicalDeviceMgr    *LogicalDeviceManager
+	kafkaProxy          *kafka.KafkaMessagingProxy
+	stateTransitions    *TransitionMap
+	localDataProxy      *model.Proxy
+	exitChannel         chan int
+	lockDeviceAgentsMap sync.RWMutex
+}
+
+func NewDeviceManager(kafkaProxy *kafka.KafkaMessagingProxy, ldProxy *model.Proxy) *DeviceManager {
+	var deviceMgr DeviceManager
+	deviceMgr.exitChannel = make(chan int, 1)
+	deviceMgr.deviceAgents = make(map[string]*DeviceAgent)
+	deviceMgr.adapterProxy = NewAdapterProxy(kafkaProxy)
+	deviceMgr.kafkaProxy = kafkaProxy
+	deviceMgr.localDataProxy = ldProxy
+	deviceMgr.lockDeviceAgentsMap = sync.RWMutex{}
+	return &deviceMgr
+}
+
+func (dMgr *DeviceManager) Start(ctx context.Context, logicalDeviceMgr *LogicalDeviceManager) {
+	log.Info("starting-device-manager")
+	dMgr.logicalDeviceMgr = logicalDeviceMgr
+	dMgr.stateTransitions = NewTransitionMap(dMgr)
+	log.Info("device-manager-started")
+}
+
+func (dMgr *DeviceManager) Stop(ctx context.Context) {
+	log.Info("stopping-device-manager")
+	dMgr.exitChannel <- 1
+	log.Info("device-manager-stopped")
+}
+
+func sendResponse(ctx context.Context, ch chan interface{}, result interface{}) {
+	if ctx.Err() == nil {
+		// Returned response only of the ctx has not been cancelled/timeout/etc
+		// Channel is automatically closed when a context is Done
+		ch <- result
+		log.Debugw("sendResponse", log.Fields{"result": result})
+	} else {
+		// Should the transaction be reverted back?
+		log.Debugw("sendResponse-context-error", log.Fields{"context-error": ctx.Err()})
+	}
+}
+
+func (dMgr *DeviceManager) addDeviceAgentToMap(agent *DeviceAgent) {
+	dMgr.lockDeviceAgentsMap.Lock()
+	defer dMgr.lockDeviceAgentsMap.Unlock()
+	if _, exist := dMgr.deviceAgents[agent.deviceId]; !exist {
+		dMgr.deviceAgents[agent.deviceId] = agent
+	}
+}
+
+func (dMgr *DeviceManager) getDeviceAgent(deviceId string) *DeviceAgent {
+	dMgr.lockDeviceAgentsMap.Lock()
+	defer dMgr.lockDeviceAgentsMap.Unlock()
+	if agent, ok := dMgr.deviceAgents[deviceId]; ok {
+		return agent
+	}
+	return nil
+}
+
+func (dMgr *DeviceManager) createDevice(ctx context.Context, device *voltha.Device, ch chan interface{}) {
+	log.Debugw("createDevice-start", log.Fields{"device": device, "aproxy": dMgr.adapterProxy})
+
+	// Create and start a device agent for that device
+	agent := newDeviceAgent(dMgr.adapterProxy, device, dMgr, dMgr.localDataProxy)
+	dMgr.addDeviceAgentToMap(agent)
+	agent.start(ctx)
+
+	sendResponse(ctx, ch, nil)
+}
+
+func (dMgr *DeviceManager) enableDevice(ctx context.Context, id *voltha.ID, ch chan interface{}) {
+	log.Debugw("enableDevice-start", log.Fields{"deviceid": id})
+
+	var res interface{}
+	if agent := dMgr.getDeviceAgent(id.Id); agent != nil {
+		res = agent.enableDevice(ctx)
+		log.Debugw("EnableDevice-result", log.Fields{"result": res})
+	} else {
+		res = status.Errorf(codes.NotFound, "%s", id.Id)
+	}
+
+	sendResponse(ctx, ch, res)
+}
+
+func (dMgr *DeviceManager) getDevice(id string) (*voltha.Device, error) {
+	log.Debugw("getDevice-start", log.Fields{"deviceid": id})
+
+	if device := dMgr.localDataProxy.Get("/devices/"+id, 1, false, ""); device == nil {
+		return nil, status.Errorf(codes.NotFound, "%s", id)
+	} else {
+		cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
+		return &cloned, nil
+	}
+}
+
+func (dMgr *DeviceManager) ListDevices() (*voltha.Devices, error) {
+	log.Debug("ListDevices-start")
+	result := &voltha.Devices{}
+	dMgr.lockDeviceAgentsMap.Lock()
+	defer dMgr.lockDeviceAgentsMap.Unlock()
+	for _, agent := range dMgr.deviceAgents {
+		if device := dMgr.localDataProxy.Get("/devices/"+agent.deviceId, 1, false, ""); device != nil {
+			cloned := reflect.ValueOf(device).Elem().Interface().(voltha.Device)
+			result.Items = append(result.Items, &cloned)
+		}
+	}
+	return result, nil
+}
+
+func (dMgr *DeviceManager) updateDevice(device *voltha.Device) error {
+	log.Debugw("updateDevice-start", log.Fields{"deviceid": device.Id, "device": device})
+
+	if agent := dMgr.getDeviceAgent(device.Id); agent != nil {
+		return agent.updateDevice(device)
+	}
+	return status.Errorf(codes.NotFound, "%s", device.Id)
+}
+
+func (dMgr *DeviceManager) addPort(deviceId string, port *voltha.Port) error {
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.addPort(port)
+	}
+	return status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) updatePmConfigs(deviceId string, pmConfigs *voltha.PmConfigs) error {
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.updatePmConfigs(pmConfigs)
+	}
+	return status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) getSwitchCapability(ctx context.Context, deviceId string) (*core_adapter.SwitchCapability, error) {
+	log.Debugw("getSwitchCapability-start", log.Fields{"deviceid": deviceId})
+
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.getSwitchCapability(ctx)
+	}
+	return nil, status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) getNNIPorts(ctx context.Context, deviceId string) (*voltha.Ports, error) {
+	log.Debugw("getNNIPorts-start", log.Fields{"deviceid": deviceId})
+
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.getNNIPorts(ctx), nil
+	}
+	return nil, status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) getPortCapability(ctx context.Context, deviceId string, portNo uint32) (*core_adapter.PortCapability, error) {
+	log.Debugw("getPortCapability-start", log.Fields{"deviceid": deviceId})
+
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.getPortCapability(ctx, portNo)
+	}
+	return nil, status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) updateDeviceState(deviceId string, operState *core_adapter.IntType, connState *core_adapter.IntType) error {
+	log.Debugw("updateDeviceState-start", log.Fields{"deviceid": deviceId, "operState": operState, "connState": connState})
+	if agent := dMgr.getDeviceAgent(deviceId); agent != nil {
+		return agent.updateDeviceState(operState, connState)
+	}
+	return status.Errorf(codes.NotFound, "%s", deviceId)
+}
+
+func (dMgr *DeviceManager) childDeviceDetected(parentDeviceId string, parentPortNo int64, deviceType string, channelId int64) error {
+	log.Debugw("childDeviceDetected-start", log.Fields{"parentDeviceId": parentDeviceId})
+
+	// Create the ONU device
+	childDevice := &voltha.Device{}
+	childDevice.Id = CreateDeviceId()
+	childDevice.Type = deviceType
+	childDevice.ParentId = parentDeviceId
+	childDevice.ParentPortNo = uint32(parentPortNo)
+	childDevice.Root = false
+	childDevice.ProxyAddress = &voltha.Device_ProxyAddress{ChannelId: uint32(channelId)}
+
+	// Create and start a device agent for that device
+	agent := newDeviceAgent(dMgr.adapterProxy, childDevice, dMgr, dMgr.localDataProxy)
+	dMgr.addDeviceAgentToMap(agent)
+	agent.start(nil)
+
+	// Activate the child device
+	if agent := dMgr.getDeviceAgent(childDevice.Id); agent != nil {
+		return agent.enableDevice(nil)
+	}
+
+	return nil
+}
+
+func (dMgr *DeviceManager) processTransition(previous *voltha.Device, current *voltha.Device) error {
+	// This will be triggered on every update to the device.
+	handler := dMgr.stateTransitions.GetTransitionHandler(previous, current)
+	if handler != nil {
+		log.Debugw("found-handler", log.Fields{"handler": funcName(handler)})
+		return handler(previous, current)
+	}
+	log.Debugw("handler-not-found", log.Fields{"deviceId": current.Id})
+	return nil
+}
+
+func (dMgr *DeviceManager) createLogicalDevice(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("createLogicalDevice")
+	var logicalId *string
+	var err error
+	if logicalId, err = dMgr.logicalDeviceMgr.CreateLogicalDevice(nil, cDevice); err != nil {
+		log.Warnw("createlogical-device-error", log.Fields{"device": cDevice})
+		return err
+	}
+	// Update the parent device with the logical id
+	dMgr.UpdateDeviceAttribute(cDevice.Id, "ParentId", *logicalId)
+	return nil
+}
+
+func (dMgr *DeviceManager) addUNILogicalPort(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("addUNILogicalPort")
+	if err := dMgr.logicalDeviceMgr.AddUNILogicalPort(nil, cDevice); err != nil {
+		log.Warnw("addUNILogicalPort-error", log.Fields{"device": cDevice, "err": err})
+		return err
+	}
+	return nil
+}
+
+func (dMgr *DeviceManager) activateDevice(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("activateDevice")
+	return nil
+}
+
+func (dMgr *DeviceManager) disableDevice(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("disableDevice")
+	return nil
+}
+
+func (dMgr *DeviceManager) abandonDevice(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("abandonDevice")
+	return nil
+}
+
+func (dMgr *DeviceManager) reEnableDevice(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("reEnableDevice")
+	return nil
+}
+
+func (dMgr *DeviceManager) noOp(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("noOp")
+	return nil
+}
+
+func (dMgr *DeviceManager) notAllowed(pDevice *voltha.Device, cDevice *voltha.Device) error {
+	log.Info("notAllowed")
+	return errors.New("Transition-not-allowed")
+}
+
+func funcName(f interface{}) string {
+	p := reflect.ValueOf(f).Pointer()
+	rf := runtime.FuncForPC(p)
+	return rf.Name()
+}
+
+func (dMgr *DeviceManager) UpdateDeviceAttribute(deviceId string, attribute string, value interface{}) {
+	if agent, ok := dMgr.deviceAgents[deviceId]; ok {
+		agent.updateDeviceAttribute(attribute, value)
+	}
+}
+
+func (dMgr *DeviceManager) GetParentDeviceId(deviceId string) *string {
+	if device, _ := dMgr.getDevice(deviceId); device != nil {
+		log.Infow("GetParentDeviceId", log.Fields{"device": device})
+		return &device.ParentId
+	}
+	return nil
+}
diff --git a/rw_core/core/device_state_transitions.go b/rw_core/core/device_state_transitions.go
new file mode 100644
index 0000000..0f0239c
--- /dev/null
+++ b/rw_core/core/device_state_transitions.go
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/protos/voltha"
+)
+
+type DeviceType int32
+
+const (
+	parent DeviceType = 0
+	child  DeviceType = 1
+	any    DeviceType = 2
+)
+
+type DeviceState struct {
+	Admin       voltha.AdminState_AdminState
+	Connection  voltha.ConnectStatus_ConnectStatus
+	Operational voltha.OperStatus_OperStatus
+}
+
+type TransitionHandler func(*voltha.Device, *voltha.Device) error
+
+type Transition struct {
+	deviceType    DeviceType
+	previousState DeviceState
+	currentState  DeviceState
+	handler       TransitionHandler
+}
+
+type TransitionMap struct {
+	transitions []Transition
+}
+
+func NewTransitionMap(dMgr *DeviceManager) *TransitionMap {
+	var transitionMap TransitionMap
+	transitionMap.transitions = make([]Transition, 0)
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_UNKNOWN, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.activateDevice})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_PREPROVISIONED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_UNKNOWN, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_PREPROVISIONED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.activateDevice})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_PREPROVISIONED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_DOWNLOADING_IMAGE, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_UNKNOWN, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    parent,
+			previousState: DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_ACTIVATING},
+			currentState:  DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_ACTIVE},
+			handler:       dMgr.createLogicalDevice})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    child,
+			previousState: DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_ACTIVATING},
+			currentState:  DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_ACTIVE},
+			handler:       dMgr.addUNILogicalPort})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.disableDevice})
+
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_PREPROVISIONED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.abandonDevice})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_UNKNOWN, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_PREPROVISIONED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.abandonDevice})
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_ENABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.reEnableDevice})
+
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_DOWNLOADING_IMAGE, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+
+	transitionMap.transitions = append(transitionMap.transitions,
+		Transition{
+			deviceType:    any,
+			previousState: DeviceState{Admin: voltha.AdminState_DOWNLOADING_IMAGE, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			currentState:  DeviceState{Admin: voltha.AdminState_DISABLED, Connection: voltha.ConnectStatus_UNKNOWN, Operational: voltha.OperStatus_UNKNOWN},
+			handler:       dMgr.notAllowed})
+
+	return &transitionMap
+}
+
+func getDeviceStates(device *voltha.Device) *DeviceState {
+	return &DeviceState{Admin: device.AdminState, Connection: device.ConnectStatus, Operational: device.OperStatus}
+}
+
+// isMatched matches a state transition.  It returns whether there is a match and if there is whether it is an exact match
+func getHandler(previous *DeviceState, current *DeviceState, transition *Transition) (TransitionHandler, bool) {
+
+	// Do we have an exact match?
+	if *previous == transition.previousState && *current == transition.currentState {
+		return transition.handler, true
+	}
+	// If the admin state changed then prioritize it first
+	if previous.Admin != current.Admin {
+		if previous.Admin == transition.previousState.Admin && current.Admin == transition.currentState.Admin {
+			return transition.handler, false
+		}
+	}
+	// If the operational state changed then prioritize it in second position
+	if previous.Operational != current.Operational {
+		if previous.Operational == transition.previousState.Operational && current.Operational == transition.currentState.Operational {
+			return transition.handler, false
+		}
+	}
+	// If the connection state changed then prioritize it in third position
+	if previous.Connection != current.Connection {
+		if previous.Connection == transition.previousState.Connection && current.Connection == transition.currentState.Connection {
+			return transition.handler, false
+		}
+	}
+	return nil, false
+}
+
+func (tMap *TransitionMap) GetTransitionHandler(pDevice *voltha.Device, cDevice *voltha.Device) TransitionHandler {
+	//1. Get the previous and current set of states
+	pState := getDeviceStates(pDevice)
+	cState := getDeviceStates(cDevice)
+	log.Infow("DeviceType", log.Fields{"device": pDevice})
+	deviceType := parent
+	if !pDevice.Root {
+		log.Info("device is child")
+		deviceType = child
+	}
+	log.Infof("deviceType:%d-deviceId:%s-previous:%v-current:%v", deviceType, pDevice.Id, pState, cState)
+
+	//2. Go over transition array to get the right transition
+	var currentMatch TransitionHandler
+	var tempHandler TransitionHandler
+	var exactMatch bool
+	var deviceTypeMatch bool
+	for _, aTransition := range tMap.transitions {
+		// consider transition only if it matches deviceType or is a wild card - any
+		if aTransition.deviceType != deviceType && aTransition.deviceType != any {
+			continue
+		}
+		tempHandler, exactMatch = getHandler(pState, cState, &aTransition)
+		if tempHandler != nil {
+			if exactMatch {
+				return tempHandler
+			} else {
+				if currentMatch == nil {
+					currentMatch = tempHandler
+				} else if aTransition.deviceType == deviceType {
+					currentMatch = tempHandler
+					deviceTypeMatch = true
+				} else if !deviceTypeMatch {
+					currentMatch = tempHandler
+				}
+			}
+		}
+	}
+	return currentMatch
+}
diff --git a/rw_core/nbi/grpc/api_handler.go b/rw_core/core/grpc_nbi_api_handler.go
similarity index 67%
rename from rw_core/nbi/grpc/api_handler.go
rename to rw_core/core/grpc_nbi_api_handler.go
index e4ebf0c..6af73cd 100644
--- a/rw_core/nbi/grpc/api_handler.go
+++ b/rw_core/core/grpc_nbi_api_handler.go
@@ -13,7 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package grpc
+package core
 
 import (
 	"context"
@@ -21,18 +21,28 @@
 	"github.com/golang/protobuf/ptypes/empty"
 	da "github.com/opencord/voltha-go/common/core/northbound/grpc"
 	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
 	"github.com/opencord/voltha-go/protos/common"
 	"github.com/opencord/voltha-go/protos/openflow_13"
 	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
 )
 
 type APIHandler struct {
+	deviceMgr        *DeviceManager
+	logicalDeviceMgr *LogicalDeviceManager
+	clusterDataProxy *model.Proxy
+	localDataProxy   *model.Proxy
 	da.DefaultAPIHandler
 }
 
-func NewAPIHandler() *APIHandler {
-	handler := &APIHandler{}
+func NewAPIHandler(deviceMgr *DeviceManager, lDeviceMgr *LogicalDeviceManager, cdProxy *model.Proxy, ldProxy *model.Proxy) *APIHandler {
+	handler := &APIHandler{deviceMgr: deviceMgr,
+		logicalDeviceMgr: lDeviceMgr,
+		clusterDataProxy: cdProxy,
+		localDataProxy:   ldProxy}
 	return handler
 }
 func isTestMode(ctx context.Context) bool {
@@ -45,20 +55,34 @@
 	log.Debugw("UpdateLogLevel-request", log.Fields{"newloglevel": logging.Level, "intval": int(logging.Level)})
 	if isTestMode(ctx) {
 		out := new(empty.Empty)
-		log.SetLoglevel(int(logging.Level))
+		log.SetPackageLogLevel(logging.PackageName, int(logging.Level))
 		return out, nil
 	}
 	return nil, errors.New("Unimplemented")
 
 }
 
+func processEnableDevicePort(ctx context.Context, id *voltha.LogicalPortId, ch chan error) {
+	log.Debugw("processEnableDevicePort", log.Fields{"id": id, "test": common.TestModeKeys_api_test.String()})
+	ch <- status.Errorf(100, "%d-%s", 100, "erreur")
+
+}
+
 func (handler *APIHandler) EnableLogicalDevicePort(ctx context.Context, id *voltha.LogicalPortId) (*empty.Empty, error) {
 	log.Debugw("EnableLogicalDevicePort-request", log.Fields{"id": id, "test": common.TestModeKeys_api_test.String()})
 	if isTestMode(ctx) {
 		out := new(empty.Empty)
 		return out, nil
 	}
-	return nil, errors.New("Unimplemented")
+	ch := make(chan error)
+	go processEnableDevicePort(ctx, id, ch)
+	select {
+	case resp := <-ch:
+		close(ch)
+		return new(empty.Empty), resp
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	}
 }
 
 func (handler *APIHandler) DisableLogicalDevicePort(ctx context.Context, id *voltha.LogicalPortId) (*empty.Empty, error) {
@@ -88,21 +112,79 @@
 	return nil, errors.New("Unimplemented")
 }
 
+// GetDevice must be implemented in the read-only containers - should it also be implemented here?
+func (handler *APIHandler) GetDevice(ctx context.Context, id *voltha.ID) (*voltha.Device, error) {
+	log.Debugw("GetDevice-request", log.Fields{"id": id})
+	return handler.deviceMgr.getDevice(id.Id)
+}
+
+// GetDevice must be implemented in the read-only containers - should it also be implemented here?
+func (handler *APIHandler) ListDevices(ctx context.Context, empty *empty.Empty) (*voltha.Devices, error) {
+	log.Debug("ListDevices")
+	return handler.deviceMgr.ListDevices()
+}
+
+// GetLogicalDevice must be implemented in the read-only containers - should it also be implemented here?
+func (handler *APIHandler) GetLogicalDevice(ctx context.Context, id *voltha.ID) (*voltha.LogicalDevice, error) {
+	log.Debugw("GetLogicalDevice-request", log.Fields{"id": id})
+	return handler.logicalDeviceMgr.getLogicalDevice(id.Id)
+}
+
+// ListLogicalDevices must be implemented in the read-only containers - should it also be implemented here?
+func (handler *APIHandler) ListLogicalDevices(ctx context.Context, empty *empty.Empty) (*voltha.LogicalDevices, error) {
+	log.Debug("ListLogicalDevices")
+	return handler.logicalDeviceMgr.listLogicalDevices()
+}
+
 func (handler *APIHandler) CreateDevice(ctx context.Context, device *voltha.Device) (*voltha.Device, error) {
-	log.Debugw("createdevice-request", log.Fields{"device": *device})
+	log.Debugw("createdevice", log.Fields{"device": *device})
 	if isTestMode(ctx) {
 		return &voltha.Device{Id: device.Id}, nil
 	}
-	return nil, errors.New("Unimplemented")
+	ch := make(chan interface{})
+	defer close(ch)
+	go handler.deviceMgr.createDevice(ctx, device, ch)
+	select {
+	case res := <-ch:
+		if res == nil {
+			return &voltha.Device{Id: device.Id}, nil
+		} else if err, ok := res.(error); ok {
+			return &voltha.Device{Id: device.Id}, err
+		} else {
+			log.Warnw("create-device-unexpected-return-type", log.Fields{"result": res})
+			err = status.Errorf(codes.Internal, "%s", res)
+			return &voltha.Device{Id: device.Id}, err
+		}
+	case <-ctx.Done():
+		log.Debug("createdevice-client-timeout")
+		return nil, ctx.Err()
+	}
 }
 
 func (handler *APIHandler) EnableDevice(ctx context.Context, id *voltha.ID) (*empty.Empty, error) {
-	log.Debugw("enabledevice-request", log.Fields{"id": id})
+	log.Debugw("enabledevice", log.Fields{"id": id})
 	if isTestMode(ctx) {
 		out := new(empty.Empty)
 		return out, nil
 	}
-	return nil, errors.New("Unimplemented")
+	ch := make(chan interface{})
+	defer close(ch)
+	go handler.deviceMgr.enableDevice(ctx, id, ch)
+	select {
+	case res := <-ch:
+		if res == nil {
+			return new(empty.Empty), nil
+		} else if err, ok := res.(error); ok {
+			return new(empty.Empty), err
+		} else {
+			log.Warnw("enable-device-unexpected-return-type", log.Fields{"result": res})
+			err = status.Errorf(codes.Internal, "%s", res)
+			return new(empty.Empty), err
+		}
+	case <-ctx.Done():
+		log.Debug("enabledevice-client-timeout")
+		return nil, ctx.Err()
+	}
 }
 
 func (handler *APIHandler) DisableDevice(ctx context.Context, id *voltha.ID) (*empty.Empty, error) {
diff --git a/rw_core/nbi/grpc/api_handler_client_test.go b/rw_core/core/grpc_nbi_api_handler_client_test.go
similarity index 89%
rename from rw_core/nbi/grpc/api_handler_client_test.go
rename to rw_core/core/grpc_nbi_api_handler_client_test.go
index e2f3188..721271e 100644
--- a/rw_core/nbi/grpc/api_handler_client_test.go
+++ b/rw_core/core/grpc_nbi_api_handler_client_test.go
@@ -13,7 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package grpc
+package core
 
 import (
 	"context"
@@ -24,6 +24,7 @@
 	"github.com/opencord/voltha-go/protos/voltha"
 	"github.com/stretchr/testify/assert"
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/status"
 	"os"
@@ -34,9 +35,14 @@
 var stub voltha.VolthaServiceClient
 var testMode string
 
+/*
+NOTE:  These tests require the rw_core to run prior to executing these test cases
+*/
+
 func setup() {
 	var err error
-	if _, err = log.SetLogger(log.JSON, 3, log.Fields{"instanceId": "testing"}); err != nil {
+
+	if err = log.AddPackage(log.JSON, log.WarnLevel, log.Fields{"instanceId": "testing"}); err != nil {
 		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
 	}
 	conn, err = grpc.Dial("localhost:50057", grpc.WithInsecure())
@@ -55,13 +61,13 @@
 	response, err := stub.GetDevice(ctx, &id)
 	assert.Nil(t, response)
 	st, _ := status.FromError(err)
-	assert.Equal(t, "UnImplemented", st.Message())
-
+	assert.Equal(t, id.Id, st.Message())
+	assert.Equal(t, codes.NotFound, st.Code())
 }
 
 func TestUpdateLogLevelError(t *testing.T) {
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
-	level := voltha.Logging{Level: common.LogLevel_ERROR}
+	level := voltha.Logging{PackageName: "github.com/opencord/voltha-go/rw_core/core", Level: common.LogLevel_ERROR}
 	response, err := stub.UpdateLogLevel(ctx, &level)
 	log.Infow("response", log.Fields{"res": response, "error": err})
 	assert.Equal(t, &empty.Empty{}, response)
@@ -78,14 +84,13 @@
 
 func TestUpdateLogLevelDebug(t *testing.T) {
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
-	level := voltha.Logging{Level: common.LogLevel_DEBUG}
+	level := voltha.Logging{PackageName: "github.com/opencord/voltha-go/rw_core/core", Level: common.LogLevel_DEBUG}
 	response, err := stub.UpdateLogLevel(ctx, &level)
 	log.Infow("response", log.Fields{"res": response, "error": err})
 	assert.Equal(t, &empty.Empty{}, response)
 	assert.Nil(t, err)
 }
 
-
 func TestGetCoreInstance(t *testing.T) {
 	id := &voltha.ID{Id: "getCoreInstance"}
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
@@ -101,7 +106,8 @@
 	response, err := stub.GetLogicalDevice(ctx, id)
 	assert.Nil(t, response)
 	st, _ := status.FromError(err)
-	assert.Equal(t, "UnImplemented", st.Message())
+	assert.Equal(t, id.Id, st.Message())
+	assert.Equal(t, codes.NotFound, st.Code())
 }
 
 func TestGetLogicalDevicePort(t *testing.T) {
@@ -142,10 +148,8 @@
 
 func TestListDevices(t *testing.T) {
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
-	response, err := stub.ListDevices(ctx, &empty.Empty{})
-	assert.Nil(t, response)
-	st, _ := status.FromError(err)
-	assert.Equal(t, "UnImplemented", st.Message())
+	response, _ := stub.ListDevices(ctx, &empty.Empty{})
+	assert.Equal(t, len(response.Items), 0)
 }
 
 func TestListAdapters(t *testing.T) {
@@ -158,10 +162,8 @@
 
 func TestListLogicalDevices(t *testing.T) {
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
-	response, err := stub.ListLogicalDevices(ctx, &empty.Empty{})
-	assert.Nil(t, response)
-	st, _ := status.FromError(err)
-	assert.Equal(t, "UnImplemented", st.Message())
+	response, _ := stub.ListLogicalDevices(ctx, &empty.Empty{})
+	assert.Equal(t, len(response.Items), 0)
 }
 
 func TestListCoreInstances(t *testing.T) {
@@ -221,6 +223,9 @@
 	ctx := metadata.NewOutgoingContext(context.Background(), metadata.Pairs(testMode, "true"))
 	id := &voltha.LogicalPortId{Id: "EnableLogicalDevicePort"}
 	response, err := stub.EnableLogicalDevicePort(ctx, id)
+	if e, ok := status.FromError(err); ok {
+		log.Infow("response", log.Fields{"error": err, "errorcode": e.Code(), "msg": e.Message()})
+	}
 	log.Infow("response", log.Fields{"res": response, "error": err})
 	assert.Equal(t, &empty.Empty{}, response)
 	assert.Nil(t, err)
diff --git a/rw_core/core/id.go b/rw_core/core/id.go
new file mode 100644
index 0000000..d5aebd5
--- /dev/null
+++ b/rw_core/core/id.go
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	m "math/rand"
+)
+
+func randomHex(n int) (string, error) {
+	bytes := make([]byte, n)
+	if _, err := rand.Read(bytes); err != nil {
+		return "", err
+	}
+	return hex.EncodeToString(bytes), nil
+}
+
+// CreateDeviceId produces a device ID. DeviceId is 16 hex long - lower 12 hex is the device id.
+// TODO:  A cluster unique ID may be required
+func CreateDeviceId() string {
+	val, _ := randomHex(12)
+	return val
+}
+
+// CreateLogicalDeviceId is not used for now as the logical device ID is derived from the
+// OLT MAC address
+func CreateLogicalDeviceId() string {
+	// logical device id is 16 hex long - lower 12 hex is the logical device id.  For now just generate the 12 hex
+	val, _ := randomHex(12)
+	return val
+}
+
+// CreateLogicalPortId produces a random port ID for a logical device.   
+func CreateLogicalPortId() uint32 {
+	//	A logical port is a uint32
+	return m.Uint32()
+}
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
new file mode 100644
index 0000000..5a9562a
--- /dev/null
+++ b/rw_core/core/logical_device_agent.go
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	"github.com/gogo/protobuf/proto"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	ca "github.com/opencord/voltha-go/protos/core_adapter"
+	"github.com/opencord/voltha-go/protos/openflow_13"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+)
+
+type LogicalDeviceAgent struct {
+	logicalDeviceId string
+	lastData        *voltha.LogicalDevice
+	rootDeviceId    string
+	deviceMgr       *DeviceManager
+	ldeviceMgr      *LogicalDeviceManager
+	localDataProxy  *model.Proxy
+	exitChannel     chan int
+}
+
+func NewLogicalDeviceAgent(id string, device *voltha.Device, ldeviceMgr *LogicalDeviceManager, deviceMgr *DeviceManager,
+	ldProxy *model.Proxy) *LogicalDeviceAgent {
+	var agent LogicalDeviceAgent
+	agent.exitChannel = make(chan int, 1)
+	agent.logicalDeviceId = id
+	agent.rootDeviceId = device.Id
+	agent.deviceMgr = deviceMgr
+	agent.localDataProxy = ldProxy
+	agent.ldeviceMgr = ldeviceMgr
+	return &agent
+}
+
+func (agent *LogicalDeviceAgent) Start(ctx context.Context) error {
+	log.Info("starting-logical_device-agent")
+	//Build the logical device based on information retrieved from the device adapter
+	var switchCap *ca.SwitchCapability
+	var err error
+	if switchCap, err = agent.deviceMgr.getSwitchCapability(ctx, agent.rootDeviceId); err != nil {
+		log.Errorw("error-creating-logical-device", log.Fields{"error": err})
+		return err
+	}
+	ld := &voltha.LogicalDevice{Id: agent.logicalDeviceId, RootDeviceId: agent.rootDeviceId}
+	ld.Desc = (proto.Clone(switchCap.Desc)).(*openflow_13.OfpDesc)
+	ld.SwitchFeatures = (proto.Clone(switchCap.SwitchFeatures)).(*openflow_13.OfpSwitchFeatures)
+
+	//Add logical ports to the logical device based on the number of NNI ports discovered
+	//First get the default port capability - TODO:  each NNI port may have different capabilities,
+	//hence. may need to extract the port by the NNI port id defined by the adapter during device
+	//creation
+	var nniPorts *voltha.Ports
+	if nniPorts, err = agent.deviceMgr.getNNIPorts(ctx, agent.rootDeviceId); err != nil {
+		log.Errorw("error-creating-logical-port", log.Fields{"error": err})
+	}
+	var portCap *ca.PortCapability
+	for _, port := range nniPorts.Items {
+		log.Infow("NNI PORTS", log.Fields{"NNI": port})
+		if portCap, err = agent.deviceMgr.getPortCapability(ctx, agent.rootDeviceId, port.PortNo); err != nil {
+			log.Errorw("error-creating-logical-device", log.Fields{"error": err})
+			return err
+		}
+
+		lp := (proto.Clone(portCap.Port)).(*voltha.LogicalPort)
+		ld.Ports = append(ld.Ports, lp)
+	}
+	// Save the logical device
+	if added := agent.localDataProxy.Add("/logical_devices", ld, ""); added == nil {
+		log.Errorw("failed-to-add-logical-device", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
+	} else {
+		log.Debugw("logicaldevice-created", log.Fields{"logicaldeviceId": agent.logicalDeviceId})
+	}
+
+	return nil
+}
+
+func (agent *LogicalDeviceAgent) addUNILogicalPort(ctx context.Context, childDevice *voltha.Device, portNo uint32) error {
+	log.Info("addUNILogicalPort-start")
+	// Build the logical device based on information retrieved from the device adapter
+	var portCap *ca.PortCapability
+	var err error
+	if portCap, err = agent.deviceMgr.getPortCapability(ctx, childDevice.Id, portNo); err != nil {
+		log.Errorw("error-creating-logical-port", log.Fields{"error": err})
+		return err
+	}
+	// Get stored logical device
+	if ldevice, err := agent.ldeviceMgr.getLogicalDevice(agent.logicalDeviceId); err != nil {
+		return status.Error(codes.NotFound, agent.logicalDeviceId)
+	} else {
+		cloned := reflect.ValueOf(ldevice).Elem().Interface().(voltha.LogicalDevice)
+		lp := (proto.Clone(portCap.Port)).(*voltha.LogicalPort)
+		cloned.Ports = append(cloned.Ports, lp)
+		afterUpdate := agent.localDataProxy.Update("/logical_devices/"+agent.logicalDeviceId, &cloned, false, "")
+		if afterUpdate == nil {
+			return status.Errorf(codes.Internal, "failed-add-UNI-port:%s", agent.logicalDeviceId)
+		}
+		return nil
+	}
+}
+
+func (agent *LogicalDeviceAgent) Stop(ctx context.Context) {
+	log.Info("stopping-logical_device-agent")
+	agent.exitChannel <- 1
+	log.Info("logical_device-agent-stopped")
+}
+
+func (agent *LogicalDeviceAgent) getLogicalDevice(ctx context.Context) *voltha.LogicalDevice {
+	log.Debug("getLogicalDevice")
+	cp := proto.Clone(agent.lastData)
+	return cp.(*voltha.LogicalDevice)
+}
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
new file mode 100644
index 0000000..61f96e8
--- /dev/null
+++ b/rw_core/core/logical_device_manager.go
@@ -0,0 +1,146 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package core
+
+import (
+	"context"
+	"errors"
+	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-go/protos/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+type LogicalDeviceManager struct {
+	logicalDeviceAgents        map[string]*LogicalDeviceAgent
+	deviceMgr                  *DeviceManager
+	adapterProxy               *AdapterProxy
+	kafkaProxy                 *kafka.KafkaMessagingProxy
+	localDataProxy             *model.Proxy
+	exitChannel                chan int
+	lockLogicalDeviceAgentsMap sync.RWMutex
+}
+
+func NewLogicalDeviceManager(deviceMgr *DeviceManager, kafkaProxy *kafka.KafkaMessagingProxy, ldProxy *model.Proxy) *LogicalDeviceManager {
+	var logicalDeviceMgr LogicalDeviceManager
+	logicalDeviceMgr.exitChannel = make(chan int, 1)
+	logicalDeviceMgr.logicalDeviceAgents = make(map[string]*LogicalDeviceAgent)
+	logicalDeviceMgr.deviceMgr = deviceMgr
+	logicalDeviceMgr.kafkaProxy = kafkaProxy
+	logicalDeviceMgr.localDataProxy = ldProxy
+	logicalDeviceMgr.lockLogicalDeviceAgentsMap = sync.RWMutex{}
+	return &logicalDeviceMgr
+}
+
+func (ldMgr *LogicalDeviceManager) Start(ctx context.Context) {
+	log.Info("starting-logical-device-manager")
+	log.Info("logical-device-manager-started")
+}
+
+func (ldMgr *LogicalDeviceManager) Stop(ctx context.Context) {
+	log.Info("stopping-logical-device-manager")
+	ldMgr.exitChannel <- 1
+	log.Info("logical-device-manager-stopped")
+}
+
+func (ldMgr *LogicalDeviceManager) addLogicalDeviceAgentToMap(agent *LogicalDeviceAgent) {
+	ldMgr.lockLogicalDeviceAgentsMap.Lock()
+	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
+	if _, exist := ldMgr.logicalDeviceAgents[agent.logicalDeviceId]; !exist {
+		ldMgr.logicalDeviceAgents[agent.logicalDeviceId] = agent
+	}
+}
+
+func (ldMgr *LogicalDeviceManager) getLogicalDeviceAgent(logicalDeviceId string) *LogicalDeviceAgent {
+	ldMgr.lockLogicalDeviceAgentsMap.Lock()
+	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
+	if agent, ok := ldMgr.logicalDeviceAgents[logicalDeviceId]; ok {
+		return agent
+	}
+	return nil
+}
+
+func (ldMgr *LogicalDeviceManager) getLogicalDevice(id string) (*voltha.LogicalDevice, error) {
+	log.Debugw("getlogicalDevice-start", log.Fields{"logicaldeviceid": id})
+	logicalDevice := ldMgr.localDataProxy.Get("/logical_devices/"+id, 1, false, "")
+	if logicalDevice != nil {
+		cloned := reflect.ValueOf(logicalDevice).Elem().Interface().(voltha.LogicalDevice)
+		return &cloned, nil
+	}
+	return nil, status.Errorf(codes.NotFound, "%s", id)
+}
+
+func (ldMgr *LogicalDeviceManager) listLogicalDevices() (*voltha.LogicalDevices, error) {
+	log.Debug("listLogicalDevices-start")
+	result := &voltha.LogicalDevices{}
+	ldMgr.lockLogicalDeviceAgentsMap.Lock()
+	defer ldMgr.lockLogicalDeviceAgentsMap.Unlock()
+	for _, agent := range ldMgr.logicalDeviceAgents {
+		logicalDevice := ldMgr.localDataProxy.Get("/logical_devices/"+agent.logicalDeviceId, 1, false, "")
+		if logicalDevice != nil {
+			cloned := reflect.ValueOf(logicalDevice).Elem().Interface().(voltha.LogicalDevice)
+			result.Items = append(result.Items, &cloned)
+		}
+	}
+	return result, nil
+}
+
+func (ldMgr *LogicalDeviceManager) CreateLogicalDevice(ctx context.Context, device *voltha.Device) (*string, error) {
+	log.Infow("creating-logical-device-start", log.Fields{"deviceId": device.Id})
+	// Sanity check
+	if !device.Root {
+		return nil, errors.New("Device-not-root")
+	}
+
+	// Create a logical device agent - the logical device Id is based on the mac address of the device
+	// For now use the serial number - it may contain any combination of alphabetic characters and numbers,
+	// with length varying from eight characters to a maximum of 14 characters.   Mac Address is part of oneof
+	// in the Device model.  May need to be moved out.
+	macAddress := device.MacAddress
+	id := strings.Replace(macAddress, ":", "", -1)
+	log.Debugw("setting-logical-device-id", log.Fields{"logicaldeviceId": id})
+
+	agent := NewLogicalDeviceAgent(id, device, ldMgr, ldMgr.deviceMgr, ldMgr.localDataProxy)
+	ldMgr.addLogicalDeviceAgentToMap(agent)
+	go agent.Start(ctx)
+
+	log.Info("creating-logical-device-ends")
+	return &id, nil
+}
+
+func (ldMgr *LogicalDeviceManager) AddUNILogicalPort(ctx context.Context, childDevice *voltha.Device) error {
+	log.Infow("AddUNILogicalPort-start", log.Fields{"deviceId": childDevice.Id})
+	// Sanity check
+	if childDevice.Root {
+		return errors.New("Device-root")
+	}
+
+	// Get the logical device id parent device
+	parentId := childDevice.ParentId
+	logDeviceId := ldMgr.deviceMgr.GetParentDeviceId(parentId)
+
+	log.Infow("AddUNILogicalPort", log.Fields{"logDeviceId": logDeviceId, "parentId": parentId})
+
+	if agent := ldMgr.getLogicalDeviceAgent(*logDeviceId); agent != nil {
+		return agent.addUNILogicalPort(ctx, childDevice, childDevice.ProxyAddress.ChannelId)
+	}
+	return status.Errorf(codes.NotFound, "%s", childDevice.Id)
+}
diff --git a/rw_core/core/requestHandlerProxy.go b/rw_core/core/requestHandlerProxy.go
deleted file mode 100644
index 1582906..0000000
--- a/rw_core/core/requestHandlerProxy.go
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package core
-
-import (
-	"errors"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/opencord/voltha-go/common/log"
-	ca "github.com/opencord/voltha-go/protos/core_adapter"
-	"github.com/opencord/voltha-go/protos/voltha"
-)
-
-type RequestHandlerProxy struct {
-	TestMode bool
-}
-
-func (rhp *RequestHandlerProxy) GetDevice(args []*ca.Argument) (error, *voltha.Device) {
-	if len(args) != 1 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return err, nil
-	}
-	pID := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err, nil
-	}
-	log.Debugw("GetDevice", log.Fields{"deviceId": pID.Val})
-	// TODO process the request
-
-	if rhp.TestMode { // Execute only for test cases
-		return nil, &voltha.Device{Id: pID.Val}
-	}
-	return nil, nil
-}
-
-func (rhp *RequestHandlerProxy) GetChildDevice(args []*ca.Argument) (error, *voltha.Device) {
-	if len(args) < 1 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return err, nil
-	}
-	pID := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err, nil
-	}
-	// TODO decompose the other parameteres for matching criteria and process
-	log.Debugw("GetChildDevice", log.Fields{"deviceId": pID.Val})
-
-	if rhp.TestMode { // Execute only for test cases
-		return nil, &voltha.Device{Id: pID.Val}
-	}
-	return nil, nil
-}
-
-func (rhp *RequestHandlerProxy) GetPorts(args []*ca.Argument) (error, *voltha.Ports) {
-	if len(args) != 2 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return err, nil
-	}
-	pID := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err, nil
-	}
-	// Porttype is an enum sent as an integer proto
-	pt := &ca.IntType{}
-	if err := ptypes.UnmarshalAny(args[1].Value, pt); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err, nil
-	}
-
-	// TODO decompose the other parameteres for matching criteria
-	log.Debugw("GetPorts", log.Fields{"deviceID": pID.Val, "portype": pt.Val})
-
-	if rhp.TestMode { // Execute only for test cases
-		aPort := &voltha.Port{Label: "test_port"}
-		allPorts := &voltha.Ports{}
-		allPorts.Items = append(allPorts.Items, aPort)
-		return nil, allPorts
-	}
-	return nil, nil
-
-}
-
-func (rhp *RequestHandlerProxy) GetChildDevices(args []*ca.Argument) (error, *voltha.Device) {
-	if len(args) != 1 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return err, nil
-	}
-	pID := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err, nil
-	}
-	// TODO decompose the other parameteres for matching criteria and process
-	log.Debugw("GetChildDevice", log.Fields{"deviceId": pID.Val})
-
-	if rhp.TestMode { // Execute only for test cases
-		return nil, &voltha.Device{Id: pID.Val}
-	}
-	return nil, nil
-}
-
-// ChildDeviceDetected is invoked when a child device is detected.  The following
-// parameters are expected:
-// {parent_device_id, parent_port_no, child_device_type, proxy_address, admin_state, **kw)
-func (rhp *RequestHandlerProxy) ChildDeviceDetected(args []*ca.Argument) error {
-	if len(args) < 5 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
-		err := errors.New("invalid-number-of-args")
-		return err
-	}
-
-	pID := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[0].Value, pID); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err
-	}
-	portNo := &ca.IntType{}
-	if err := ptypes.UnmarshalAny(args[1].Value, portNo); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err
-	}
-	dt := &ca.StrType{}
-	if err := ptypes.UnmarshalAny(args[2].Value, dt); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err
-	}
-	pAddr := &voltha.Device_ProxyAddress{}
-	if err := ptypes.UnmarshalAny(args[3].Value, pAddr); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err
-	}
-	adminState := &ca.IntType{}
-	if err := ptypes.UnmarshalAny(args[4].Value, adminState); err != nil {
-		log.Warnw("cannot-unmarshal-argument", log.Fields{"error": err})
-		return err
-	}
-
-	// Need to decode the other params - in this case the key will represent the proto type
-	// TODO decompose the other parameteres for matching criteria and process
-	log.Debugw("ChildDeviceDetected", log.Fields{"deviceId": pID.Val, "portNo": portNo.Val,
-		"deviceType": dt.Val, "proxyAddress": pAddr, "adminState": adminState})
-
-	if rhp.TestMode { // Execute only for test cases
-		return nil
-	}
-	return nil
-}
diff --git a/rw_core/main.go b/rw_core/main.go
index f495aeb..b73f131 100644
--- a/rw_core/main.go
+++ b/rw_core/main.go
@@ -22,12 +22,9 @@
 	grpcserver "github.com/opencord/voltha-go/common/grpc"
 	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/kafka"
 	ca "github.com/opencord/voltha-go/protos/core_adapter"
-	"github.com/opencord/voltha-go/protos/voltha"
 	"github.com/opencord/voltha-go/rw_core/config"
-	grpcapi "github.com/opencord/voltha-go/rw_core/nbi/grpc"
-	"google.golang.org/grpc"
+	c "github.com/opencord/voltha-go/rw_core/core"
 	"os"
 	"os/signal"
 	"strconv"
@@ -40,12 +37,17 @@
 	config      *config.RWCoreFlags
 	halted      bool
 	exitChannel chan int
-	kmp         *kafka.KafkaMessagingProxy
-	grpcServer  *grpcserver.GrpcServer
+	//kmp         *kafka.KafkaMessagingProxy
+	grpcServer *grpcserver.GrpcServer
+	core       *c.Core
 	//For test
 	receiverChannels []<-chan *ca.InterContainerMessage
 }
 
+func init() {
+	log.AddPackage(log.JSON, log.WarnLevel, nil)
+}
+
 func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
 
 	log.Infow("kv-store-type", log.Fields{"store": storeType})
@@ -67,14 +69,14 @@
 	return &rwCore
 }
 
-func (core *rwCore) setKVClient() error {
-	addr := core.config.KVStoreHost + ":" + strconv.Itoa(core.config.KVStorePort)
-	client, err := newKVClient(core.config.KVStoreType, addr, core.config.KVStoreTimeout)
+func (rw *rwCore) setKVClient() error {
+	addr := rw.config.KVStoreHost + ":" + strconv.Itoa(rw.config.KVStorePort)
+	client, err := newKVClient(rw.config.KVStoreType, addr, rw.config.KVStoreTimeout)
 	if err != nil {
 		log.Error(err)
 		return err
 	}
-	core.kvClient = client
+	rw.kvClient = client
 	return nil
 }
 
@@ -89,70 +91,75 @@
 	}
 }
 
-func (core *rwCore) startGRPCService(context.Context) {
-	//	create an insecure gserver server
-	core.grpcServer = grpcserver.NewGrpcServer(core.config.GrpcHost, core.config.GrpcPort, nil, false)
-	log.Info("server created")
-	//
-	//	Create a function to register the core GRPC service with the GRPC server
-	f := func(gs *grpc.Server) {
-		voltha.RegisterVolthaServiceServer(
-			gs,
-			grpcapi.NewAPIHandler(),
-		)
-	}
+//func (rw *rwCore) createGRPCService(context.Context) {
+//	//	create an insecure gserver server
+//	rw.grpcServer = grpcserver.NewGrpcServer(rw.config.GrpcHost, rw.config.GrpcPort, nil, false)
+//	log.Info("grpc-server-created")
+//}
 
-	core.grpcServer.AddService(f)
-	log.Info("service add")
+//func (rw *rwCore) startKafkaMessagingProxy(ctx context.Context) error {
+//	log.Infow("starting-kafka-messaging-proxy", log.Fields{"host":rw.config.KafkaAdapterHost,
+//	"port":rw.config.KafkaAdapterPort, "topic":rw.config.CoreTopic})
+//	var err error
+//	if rw.kmp, err = kafka.NewKafkaMessagingProxy(
+//		kafka.KafkaHost(rw.config.KafkaAdapterHost),
+//		kafka.KafkaPort(rw.config.KafkaAdapterPort),
+//		kafka.DefaultTopic(&kafka.Topic{Name: rw.config.CoreTopic})); err != nil {
+//		log.Errorw("fail-to-create-kafka-proxy", log.Fields{"error": err})
+//		return err
+//	}
+//	if err = rw.kmp.Start(); err != nil {
+//		log.Fatalw("error-starting-messaging-proxy", log.Fields{"error": err})
+//		return err
+//	}
+//
+//	requestProxy := &c.RequestHandlerProxy{}
+//	rw.kmp.SubscribeWithTarget(kafka.Topic{Name: rw.config.CoreTopic}, requestProxy)
+//
+//	log.Info("started-kafka-messaging-proxy")
+//	return nil
+//}
 
-	//	Start the server
-	core.grpcServer.Start(context.Background())
-	log.Info("server started")
-}
-
-
-func (core *rwCore) start(ctx context.Context) {
+func (rw *rwCore) start(ctx context.Context) {
 	log.Info("Starting RW Core components")
-	// Setup GRPC Server
-	go core.startGRPCService(ctx)
+
+	//// Setup GRPC Server
+	//rw.createGRPCService(ctx)
+
+	//// Setup Kafka messaging services
+	//if err := rw.startKafkaMessagingProxy(ctx); err != nil {
+	//	log.Fatalw("failed-to-start-kafka-proxy", log.Fields{"err":err})
+	//}
+
+	// Create the core service
+	rw.core = c.NewCore(rw.config.InstanceID, rw.config)
+
+	// start the core
+	rw.core.Start(ctx)
 
 	// Setup KV Client
-
-	// Setup Kafka messaging services
-	var err error
-	if core.kmp, err = kafka.NewKafkaMessagingProxy(
-		kafka.KafkaHost("10.100.198.220"),
-		kafka.KafkaPort(9092),
-		kafka.DefaultTopic(&kafka.Topic{Name: "Adapter"})); err != nil {
-		log.Errorw("fail-to-create-kafka-proxy", log.Fields{"error": err})
-		return
-	}
-	// Start the kafka messaging service - synchronous call to ensure
-	if err = core.kmp.Start(); err != nil {
-		log.Fatalw("error-starting-messaging-proxy", log.Fields{"error": err})
-	}
 }
 
-func (core *rwCore) stop() {
+func (rw *rwCore) stop() {
 	// Stop leadership tracking
-	core.halted = true
+	rw.halted = true
 
-	// Stop the Kafka messaging service
-	if core.kmp != nil {
-		core.kmp.Stop()
-	}
+	//// Stop the Kafka messaging service
+	//if rw.kmp != nil {
+	//	rw.kmp.Stop()
+	//}
 
 	// send exit signal
-	core.exitChannel <- 0
+	rw.exitChannel <- 0
 
 	// Cleanup - applies only if we had a kvClient
-	if core.kvClient != nil {
+	if rw.kvClient != nil {
 		// Release all reservations
-		if err := core.kvClient.ReleaseAllReservations(); err != nil {
+		if err := rw.kvClient.ReleaseAllReservations(); err != nil {
 			log.Infow("fail-to-release-all-reservations", log.Fields{"error": err})
 		}
 		// Close the DB connection
-		core.kvClient.Close()
+		rw.kvClient.Close()
 	}
 }
 
@@ -201,10 +208,21 @@
 	cf := config.NewRWCoreFlags()
 	cf.ParseCommandArguments()
 
-	// Setup logging
-	if _, err := log.SetLogger(log.JSON, cf.LogLevel, log.Fields{"instanceId": cf.InstanceID}); err != nil {
+	//// Setup logging
+
+	//Setup default logger - applies for packages that do not have specific logger set
+	if _, err := log.SetDefaultLogger(log.JSON, cf.LogLevel, log.Fields{"instanceId": cf.InstanceID}); err != nil {
 		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
 	}
+
+	// Update all loggers (provisionned via init) with a common field
+	if err := log.UpdateAllLoggers(log.Fields{"instanceId": cf.InstanceID}); err != nil {
+		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
+	}
+
+	log.SetPackageLogLevel("github.com/opencord/voltha-go/rw_core/core", log.DebugLevel)
+	log.SetPackageLogLevel("github.com/opencord/voltha-go/kafka", log.DebugLevel)
+
 	defer log.CleanUp()
 
 	// Print banner if specified
@@ -217,15 +235,15 @@
 	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()
 
-	core := newRWCore(cf)
-	go core.start(ctx)
+	rw := newRWCore(cf)
+	go rw.start(ctx)
 
 	code := waitForExit()
 	log.Infow("received-a-closing-signal", log.Fields{"code": code})
 
 	// Cleanup before leaving
-	core.stop()
+	rw.stop()
 
 	elapsed := time.Since(start)
-	log.Infow("rw-core-run-time", log.Fields{"core": core.config.InstanceID, "time": elapsed / time.Second})
+	log.Infow("rw-core-run-time", log.Fields{"core": rw.config.InstanceID, "time": elapsed / time.Second})
 }
diff --git a/tests/kafka/kafka_inter_container_messaging_test.go b/tests/kafka/kafka_inter_container_messaging_test.go
index a1865c7..1f60cb1 100644
--- a/tests/kafka/kafka_inter_container_messaging_test.go
+++ b/tests/kafka/kafka_inter_container_messaging_test.go
@@ -37,12 +37,12 @@
 		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
 	}
 	coreKafkaProxy, _ = kk.NewKafkaMessagingProxy(
-		kk.KafkaHost("10.100.198.220"),
+		kk.KafkaHost("10.176.215.107"),
 		kk.KafkaPort(9092),
 		kk.DefaultTopic(&kk.Topic{Name: "Core"}))
 
 	adapterKafkaProxy, _ = kk.NewKafkaMessagingProxy(
-		kk.KafkaHost("10.100.198.220"),
+		kk.KafkaHost("10.176.215.107"),
 		kk.KafkaPort(9092),
 		kk.DefaultTopic(&kk.Topic{Name: "Adapter"}))