This commit consists of the following:
1) The kafka messaging proxy in Twisted python for adapters
2) Initial implementation and containerization of ponsim OLT adapter
and ponsim ONU adapter
3) Initial submission of request and response facade in both Twisted
python and Go Language
4) Initial implementation of device management and logical device management
in the Core
5) Update to the log module to allow dynamic setting of log level per
package using the gRPC API
6) Bug fixes and minor changes

Change-Id: Ia8f033da84cfd08275335bae9542802415e7bb0f
diff --git a/adapters/__init__.py b/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/__init__.py
diff --git a/adapters/common/__init__.py b/adapters/common/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/event_bus.py b/adapters/common/event_bus.py
new file mode 100644
index 0000000..e717c16
--- /dev/null
+++ b/adapters/common/event_bus.py
@@ -0,0 +1,194 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A simple internal pub/sub event bus with topics and filter-based registration.
+"""
+import re
+
+import structlog
+
+
+log = structlog.get_logger()
+
+
+class _Subscription(object):
+
+    __slots__ = ('bus', 'predicate', 'callback', 'topic')
+    def __init__(self, bus, predicate, callback, topic=None):
+        self.bus = bus
+        self.predicate = predicate
+        self.callback = callback
+        self.topic = topic
+
+
+class EventBus(object):
+
+    def __init__(self):
+        self.subscriptions = {}  # topic -> list of _Subscription objects
+                                 # topic None holds regexp based topic subs.
+        self.subs_topic_map = {} # to aid fast lookup when unsubscribing
+
+    def list_subscribers(self, topic=None):
+        if topic is None:
+            return sum(self.subscriptions.itervalues(), [])
+        else:
+            if topic in self.subscriptions:
+                return self.subscriptions[topic]
+            else:
+                return []
+
+    @staticmethod
+    def _get_topic_key(topic):
+        if isinstance(topic, str):
+            return topic
+        elif hasattr(topic, 'match'):
+            return None
+        else:
+            raise AttributeError('topic not a string nor a compiled regex')
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function signature def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        subscription = _Subscription(self, predicate, callback, topic)
+        topic_key = self._get_topic_key(topic)
+        self.subscriptions.setdefault(topic_key, []).append(subscription)
+        self.subs_topic_map[subscription] = topic_key
+        return subscription
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        topic_key = self.subs_topic_map[subscription]
+        self.subscriptions[topic_key].remove(subscription)
+
+    def publish(self, topic, msg):
+        """
+        Publish given message to all subscribers registered with topic taking
+        the predicate functions into account.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        from copy import copy
+
+        def passes(msg, predicate):
+            try:
+                return predicate(msg)
+            except Exception, e:
+                return False  # failed predicate function treated as no match
+
+        # lookup subscribers with explicit topic subscriptions
+        subscribers = self.subscriptions.get(topic, [])
+
+        # add matching regexp topic subscribers
+        subscribers.extend(s for s in self.subscriptions.get(None, [])
+                           if s.topic.match(topic))
+
+        # iterate over a shallow-copy of subscribers
+        for candidate in copy(subscribers):
+            predicate = candidate.predicate
+            if predicate is None or passes(msg, predicate):
+                try:
+                    candidate.callback(topic, msg)
+                except Exception, e:
+                    log.exception('callback-failed', e=repr(e), topic=topic)
+
+
+
+default_bus = EventBus()
+
+
+class EventBusClient(object):
+    """
+    Primary interface to the EventBus. Usage:
+
+    Publish:
+    >>> events = EventBusClient()
+    >>> msg = dict(a=1, b='foo')
+    >>> events.publish('a.topic', msg)
+
+    Subscribe to get all messages on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event)
+
+    Subscribe to get messages matching predicate on specific topic:
+    >>> def got_event(topic, msg):
+    >>>     print topic, ':', msg
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', got_event, lambda msg: msg.len() < 100)
+
+    Use a DeferredQueue to buffer incoming messages
+    >>> queue = DeferredQueue()
+    >>> events = EventBusClient()
+    >>> events.subscribe('a.topic', lambda _, msg: queue.put(msg))
+
+    """
+    def __init__(self, bus=None):
+        """
+        Obtain a client interface for the pub/sub event bus.
+        :param bus: An optional specific event bus. Inteded for mainly test
+        use. If not provided, the process default bus will be used, which is
+        the preferred use (a process shall not need more than one bus).
+        """
+        self.bus = bus or default_bus
+
+    def publish(self, topic, msg):
+        """
+        Publish given msg to given topic.
+        :param topic: String topic
+        :param msg: Arbitrary python data as message
+        :return: None
+        """
+        self.bus.publish(topic, msg)
+
+    def subscribe(self, topic, callback, predicate=None):
+        """
+        Subscribe to given topic with predicate and register the callback
+        :param topic: String topic (explicit) or regexp based topic filter.
+        :param callback: Callback method with signature def func(topic, msg)
+        :param predicate: Optional method/function with signature
+        def predicate(msg)
+        :return: Subscription object which can be used to unsubscribe
+        """
+        return self.bus.subscribe(topic, callback, predicate)
+
+    def unsubscribe(self, subscription):
+        """
+        Remove given subscription
+        :param subscription: subscription object as was returned by subscribe
+        :return: None
+        """
+        return self.bus.unsubscribe(subscription)
+
+    def list_subscribers(self, topic=None):
+        """
+        Return list of subscribers. If topci is provided, it is filtered for
+        those subscribing to the topic.
+        :param topic: Optional topic
+        :return: List of subscriptions
+        """
+        return self.bus.list_subscribers(topic)
diff --git a/adapters/common/frameio/__init__.py b/adapters/common/frameio/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/frameio.py b/adapters/common/frameio/frameio.py
new file mode 100644
index 0000000..2f68ef8
--- /dev/null
+++ b/adapters/common/frameio/frameio.py
@@ -0,0 +1,437 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A module that can send and receive raw ethernet frames on a set of interfaces
+and it can manage a set of vlan interfaces on top of existing
+interfaces. Due to reliance on raw sockets, this module requires
+root access. Also, raw sockets are hard to deal with in Twisted (not
+directly supported) we need to run the receiver select loop on a dedicated
+thread.
+"""
+
+import os
+import socket
+import struct
+import uuid
+from pcapy import BPFProgram
+from threading import Thread, Condition
+
+import fcntl
+
+import select
+import structlog
+import sys
+
+from scapy.data import ETH_P_ALL
+from twisted.internet import reactor
+from zope.interface import implementer
+
+from adapters.common.utils.registry import IComponent
+
+if sys.platform.startswith('linux'):
+    from adapters.common.frameio.third_party.oftest import afpacket, netutils
+elif sys.platform == 'darwin':
+    from scapy.arch import pcapdnet, BIOCIMMEDIATE, dnet
+
+log = structlog.get_logger()
+
+
+def hexify(buffer):
+    """
+    Return a hexadecimal string encoding of input buffer
+    """
+    return ''.join('%02x' % ord(c) for c in buffer)
+
+
+class _SelectWakerDescriptor(object):
+    """
+    A descriptor that can be mixed into a select loop to wake it up.
+    """
+    def __init__(self):
+        self.pipe_read, self.pipe_write = os.pipe()
+        fcntl.fcntl(self.pipe_write, fcntl.F_SETFL, os.O_NONBLOCK)
+
+    def __del__(self):
+        os.close(self.pipe_read)
+        os.close(self.pipe_write)
+
+    def fileno(self):
+        return self.pipe_read
+
+    def wait(self):
+        os.read(self.pipe_read, 1)
+
+    def notify(self):
+        """Trigger a select loop"""
+        os.write(self.pipe_write, '\x00')
+
+
+class BpfProgramFilter(object):
+    """
+    Convenience packet filter based on the well-tried Berkeley Packet Filter,
+    used by many well known open source tools such as pcap and tcpdump.
+    """
+    def __init__(self, program_string):
+        """
+        Create a filter using the BPF command syntax. To learn more,
+        consult 'man pcap-filter'.
+        :param program_string: The textual definition of the filter. Examples:
+        'vlan 1000'
+        'vlan 1000 and ip src host 10.10.10.10'
+        """
+        self.bpf = BPFProgram(program_string)
+
+    def __call__(self, frame):
+        """
+        Return 1 if frame passes filter.
+        :param frame: Raw frame provided as Python string
+        :return: 1 if frame satisfies filter, 0 otherwise.
+        """
+        return self.bpf.filter(frame)
+
+
+class FrameIOPort(object):
+    """
+    Represents a network interface which we can send/receive raw
+    Ethernet frames.
+    """
+
+    RCV_SIZE_DEFAULT = 4096
+    ETH_P_ALL = 0x03
+    RCV_TIMEOUT = 10000
+    MIN_PKT_SIZE = 60
+
+    def __init__(self, iface_name):
+        self.iface_name = iface_name
+        self.proxies = []
+        self.socket = self.open_socket(self.iface_name)
+        log.debug('socket-opened', fn=self.fileno(), iface=iface_name)
+        self.received = 0
+        self.discarded = 0
+
+    def add_proxy(self, proxy):
+        self.proxies.append(proxy)
+
+    def del_proxy(self, proxy):
+        self.proxies = [p for p in self.proxies if p.name != proxy.name]
+
+    def open_socket(self, iface_name):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def rcv_frame(self):
+        raise NotImplementedError('to be implemented by derived class')
+
+    def __del__(self):
+        if self.socket:
+            self.socket.close()
+            self.socket = None
+        log.debug('socket-closed', iface=self.iface_name)
+
+    def fileno(self):
+        return self.socket.fileno()
+
+    def _dispatch(self, proxy, frame):
+        log.debug('calling-publisher', proxy=proxy.name, frame=hexify(frame))
+        try:
+            proxy.callback(proxy, frame)
+        except Exception as e:
+            log.exception('callback-error',
+                          explanation='Callback failed while processing frame',
+                          e=e)
+
+    def recv(self):
+        """Called on the select thread when a packet arrives"""
+        try:
+            frame = self.rcv_frame()
+        except RuntimeError as e:
+            # we observed this happens sometimes right after the socket was
+            # attached to a newly created veth interface. So we log it, but
+            # allow to continue.
+            log.warn('afpacket-recv-error', code=-1)
+            return
+
+        log.debug('frame-received', iface=self.iface_name, len=len(frame),
+                  hex=hexify(frame))
+        self.received +=1
+        dispatched = False
+        for proxy in self.proxies:
+            if proxy.filter is None or proxy.filter(frame):
+                log.debug('frame-dispatched')
+                dispatched = True
+                reactor.callFromThread(self._dispatch, proxy, frame)
+
+        if not dispatched:
+            self.discarded += 1
+            log.debug('frame-discarded')
+
+    def send(self, frame):
+        log.debug('sending', len=len(frame), iface=self.iface_name)
+        sent_bytes = self.send_frame(frame)
+        if sent_bytes != len(frame):
+            log.error('send-error', iface=self.iface_name,
+                      wanted_to_send=len(frame), actually_sent=sent_bytes)
+        return sent_bytes
+
+    def send_frame(self, frame):
+        try:
+            return self.socket.send(frame)
+        except socket.error, err:
+            if err[0] == os.errno.EINVAL:
+                if len(frame) < self.MIN_PKT_SIZE:
+                    padding = '\x00' * (self.MIN_PKT_SIZE - len(frame))
+                    frame = frame + padding
+                    return self.socket.send(frame)
+            else:
+                raise
+
+    def up(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} up'.format(self.iface_name))
+        return self
+
+    def down(self):
+        if sys.platform.startswith('darwin'):
+            pass
+        else:
+            os.system('ip link set {} down'.format(self.iface_name))
+        return self
+
+    def statistics(self):
+        return self.received, self.discarded
+
+
+class LinuxFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0)
+        afpacket.enable_auxdata(s)
+        s.bind((self.iface_name, self.ETH_P_ALL))
+        netutils.set_promisc(s, iface_name)
+        s.settimeout(self.RCV_TIMEOUT)
+        return s
+
+    def rcv_frame(self):
+        return afpacket.recv(self.socket, self.RCV_SIZE_DEFAULT)
+
+
+class DarwinFrameIOPort(FrameIOPort):
+
+    def open_socket(self, iface_name):
+        sin = pcapdnet.open_pcap(iface_name, 1600, 1, 100)
+        try:
+            fcntl.ioctl(sin.fileno(), BIOCIMMEDIATE, struct.pack("I",1))
+        except:
+            pass
+
+        # need a different kind of socket for sending out
+        self.sout = dnet.eth(iface_name)
+
+        return sin
+
+    def send_frame(self, frame):
+        return self.sout.send(frame)
+
+    def rcv_frame(self):
+        pkt = self.socket.next()
+        if pkt is not None:
+            ts, pkt = pkt
+        return pkt
+
+
+if sys.platform == 'darwin':
+    _FrameIOPort = DarwinFrameIOPort
+elif sys.platform.startswith('linux'):
+    _FrameIOPort = LinuxFrameIOPort
+else:
+    raise Exception('Unsupported platform {}'.format(sys.platform))
+    sys.exit(1)
+
+
+class FrameIOPortProxy(object):
+    """Makes FrameIOPort sharable between multiple users"""
+
+    def __init__(self, frame_io_port, callback, filter=None, name=None):
+        self.frame_io_port = frame_io_port
+        self.callback = callback
+        self.filter = filter
+        self.name = uuid.uuid4().hex[:12] if name is None else name
+
+    @property
+    def iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def get_iface_name(self):
+        return self.frame_io_port.iface_name
+
+    def send(self, frame):
+        return self.frame_io_port.send(frame)
+
+    def up(self):
+        self.frame_io_port.up()
+        return self
+
+    def down(self):
+        self.frame_io_port.down()
+        return self
+
+
+@implementer(IComponent)
+class FrameIOManager(Thread):
+    """
+    Packet/Frame IO manager that can be used to send/receive raw frames
+    on a set of network interfaces.
+    """
+    def __init__(self):
+        super(FrameIOManager, self).__init__()
+
+        self.ports = {}  # iface_name -> ActiveFrameReceiver
+        self.queue = {}  # iface_name -> TODO
+
+        self.cvar = Condition()
+        self.waker = _SelectWakerDescriptor()
+        self.stopped = False
+        self.ports_changed = False
+
+    # ~~~~~~~~~~~ exposed methods callable from main thread ~~~~~~~~~~~~~~~~~~~
+
+    def start(self):
+        """
+        Start the IO manager and its select loop thread
+        """
+        log.debug('starting')
+        super(FrameIOManager, self).start()
+        log.info('started')
+        return self
+
+    def stop(self):
+        """
+        Stop the IO manager and its thread with the select loop
+        """
+        log.debug('stopping')
+        self.stopped = True
+        self.waker.notify()
+        self.join()
+        del self.ports
+        log.info('stopped')
+
+    def list_interfaces(self):
+        """
+        Return list of interfaces listened on
+        :return: List of FrameIOPort objects
+        """
+        return self.ports
+
+    def open_port(self, iface_name, callback, filter=None, name=None):
+        """
+        Add a new interface and start receiving on it.
+        :param iface_name: Name of the interface. Must be an existing Unix
+        interface (eth0, en0, etc.)
+        :param callback: Called on each received frame;
+        signature: def callback(port, frame) where port is the FrameIOPort
+        instance at which the frame was received, frame is the actual frame
+        received (as binay string)
+        :param filter: An optional filter (predicate), with signature:
+        def filter(frame). If provided, only frames for which filter evaluates
+        to True will be forwarded to callback.
+        :return: FrmaeIOPortProxy instance.
+        """
+
+        port = self.ports.get(iface_name)
+        if port is None:
+            port = _FrameIOPort(iface_name)
+            self.ports[iface_name] = port
+            self.ports_changed = True
+            self.waker.notify()
+
+        proxy = FrameIOPortProxy(port, callback, filter, name)
+        port.add_proxy(proxy)
+
+        return proxy
+
+    def close_port(self, proxy):
+        """
+        Remove the proxy. If this is the last proxy on an interface, stop and
+        remove the named interface as well
+        :param proxy: FrameIOPortProxy reference
+        :return: None
+        """
+        assert isinstance(proxy, FrameIOPortProxy)
+        iface_name = proxy.get_iface_name()
+        assert iface_name in self.ports, "iface_name {} unknown".format(iface_name)
+        port = self.ports[iface_name]
+        port.del_proxy(proxy)
+
+        if not port.proxies:
+            del self.ports[iface_name]
+            # need to exit select loop to reconstruct select fd lists
+            self.ports_changed = True
+            self.waker.notify()
+
+    def send(self, iface_name, frame):
+        """
+        Send frame on given interface
+        :param iface_name: Name of previously registered interface
+        :param frame: frame as string
+        :return: number of bytes sent
+        """
+        return self.ports[iface_name].send(frame)
+
+    # ~~~~~~~~~~~~~ Thread methods (running on non-main thread ~~~~~~~~~~~~~~~~
+
+    def run(self):
+        """
+        Called on the alien thread, this is the core multi-port receive loop
+        """
+
+        log.debug('select-loop-started')
+
+        # outer loop constructs sockets list for select
+        while not self.stopped:
+            sockets = [self.waker] + self.ports.values()
+            self.ports_changed = False
+            empty = []
+            # inner select loop
+
+            while not self.stopped:
+                try:
+                    _in, _out, _err = select.select(sockets, empty, empty, 1)
+                except Exception as e:
+                    log.exception('frame-io-select-error', e=e)
+                    break
+                with self.cvar:
+                    for port in _in:
+                        if port is self.waker:
+                            self.waker.wait()
+                            continue
+                        else:
+                            port.recv()
+                    self.cvar.notify_all()
+                if self.ports_changed:
+                    break  # break inner loop so we reconstruct sockets list
+
+        log.debug('select-loop-exited')
+
+    def del_interface(self, iface_name):
+        """
+            Delete interface for stopping
+        """
+
+        log.info('Delete interface')
+        del self.ports[iface_name]
+        log.info('Interface(port) is deleted')
diff --git a/adapters/common/frameio/third_party/__init__.py b/adapters/common/frameio/third_party/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/third_party/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/third_party/oftest/LICENSE b/adapters/common/frameio/third_party/oftest/LICENSE
new file mode 100644
index 0000000..3216042
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/LICENSE
@@ -0,0 +1,36 @@
+OpenFlow Test Framework
+
+Copyright (c) 2010 The Board of Trustees of The Leland Stanford
+Junior University
+
+Except where otherwise noted, this software is distributed under
+the OpenFlow Software License.  See
+http://www.openflowswitch.org/wp/legal/ for current details.
+
+We are making the OpenFlow specification and associated documentation
+(Software) available for public use and benefit with the expectation
+that others will use, modify and enhance the Software and contribute
+those enhancements back to the community. However, since we would like
+to make the Software available for broadest use, with as few
+restrictions as possible permission is hereby granted, free of charge,
+to any person obtaining a copy of this Software to deal in the
+Software under the copyrights without restriction, including without
+limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED -Y´AS IS¡, WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+The name and trademarks of copyright holder(s) may NOT be used in
+advertising or publicity pertaining to the Software or any derivatives
+without specific, written prior permission.
diff --git a/adapters/common/frameio/third_party/oftest/README.md b/adapters/common/frameio/third_party/oftest/README.md
new file mode 100644
index 0000000..f0cb649
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/README.md
@@ -0,0 +1,6 @@
+Files in this directory are derived from the respective files
+in oftest (http://github.com/floodlight/oftest).
+ 
+For the licensing terms of these files, see LICENSE in this dir.
+ 
+
diff --git a/adapters/common/frameio/third_party/oftest/__init__.py b/adapters/common/frameio/third_party/oftest/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/frameio/third_party/oftest/afpacket.py b/adapters/common/frameio/third_party/oftest/afpacket.py
new file mode 100644
index 0000000..9ae8075
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/afpacket.py
@@ -0,0 +1,124 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+AF_PACKET receive support
+
+When VLAN offload is enabled on the NIC Linux will not deliver the VLAN tag
+in the data returned by recv. Instead, it delivers the VLAN TCI in a control
+message. Python 2.x doesn't have built-in support for recvmsg, so we have to
+use ctypes to call it. The recv function exported by this module reconstructs
+the VLAN tag if it was offloaded.
+"""
+
+import struct
+from ctypes import *
+
+ETH_P_8021Q = 0x8100
+SOL_PACKET = 263
+PACKET_AUXDATA = 8
+TP_STATUS_VLAN_VALID = 1 << 4
+
+class struct_iovec(Structure):
+    _fields_ = [
+        ("iov_base", c_void_p),
+        ("iov_len", c_size_t),
+    ]
+
+class struct_msghdr(Structure):
+    _fields_ = [
+        ("msg_name", c_void_p),
+        ("msg_namelen", c_uint32),
+        ("msg_iov", POINTER(struct_iovec)),
+        ("msg_iovlen", c_size_t),
+        ("msg_control", c_void_p),
+        ("msg_controllen", c_size_t),
+        ("msg_flags", c_int),
+    ]
+
+class struct_cmsghdr(Structure):
+    _fields_ = [
+        ("cmsg_len", c_size_t),
+        ("cmsg_level", c_int),
+        ("cmsg_type", c_int),
+    ]
+
+class struct_tpacket_auxdata(Structure):
+    _fields_ = [
+        ("tp_status", c_uint),
+        ("tp_len", c_uint),
+        ("tp_snaplen", c_uint),
+        ("tp_mac", c_ushort),
+        ("tp_net", c_ushort),
+        ("tp_vlan_tci", c_ushort),
+        ("tp_padding", c_ushort),
+    ]
+
+libc = CDLL("libc.so.6")
+recvmsg = libc.recvmsg
+recvmsg.argtypes = [c_int, POINTER(struct_msghdr), c_int]
+recvmsg.retype = c_int
+
+def enable_auxdata(sk):
+    """
+    Ask the kernel to return the VLAN tag in a control message
+
+    Must be called on the socket before afpacket.recv.
+    """
+    sk.setsockopt(SOL_PACKET, PACKET_AUXDATA, 1)
+
+def recv(sk, bufsize):
+    """
+    Receive a packet from an AF_PACKET socket
+    @sk Socket
+    @bufsize Maximum packet size
+    """
+    buf = create_string_buffer(bufsize)
+
+    ctrl_bufsize = sizeof(struct_cmsghdr) + sizeof(struct_tpacket_auxdata) + sizeof(c_size_t)
+    ctrl_buf = create_string_buffer(ctrl_bufsize)
+
+    iov = struct_iovec()
+    iov.iov_base = cast(buf, c_void_p)
+    iov.iov_len = bufsize
+
+    msghdr = struct_msghdr()
+    msghdr.msg_name = None
+    msghdr.msg_namelen = 0
+    msghdr.msg_iov = pointer(iov)
+    msghdr.msg_iovlen = 1
+    msghdr.msg_control = cast(ctrl_buf, c_void_p)
+    msghdr.msg_controllen = ctrl_bufsize
+    msghdr.msg_flags = 0
+
+    rv = recvmsg(sk.fileno(), byref(msghdr), 0)
+    if rv < 0:
+        raise RuntimeError("recvmsg failed: rv=%d", rv)
+
+    # The kernel only delivers control messages we ask for. We
+    # only enabled PACKET_AUXDATA, so we can assume it's the
+    # only control message.
+    assert msghdr.msg_controllen >= sizeof(struct_cmsghdr)
+
+    cmsghdr = struct_cmsghdr.from_buffer(ctrl_buf) # pylint: disable=E1101
+    assert cmsghdr.cmsg_level == SOL_PACKET
+    assert cmsghdr.cmsg_type == PACKET_AUXDATA
+
+    auxdata = struct_tpacket_auxdata.from_buffer(ctrl_buf, sizeof(struct_cmsghdr)) # pylint: disable=E1101
+
+    if auxdata.tp_vlan_tci != 0 or auxdata.tp_status & TP_STATUS_VLAN_VALID:
+        # Insert VLAN tag
+        tag = struct.pack("!HH", ETH_P_8021Q, auxdata.tp_vlan_tci)
+        return buf.raw[:12] + tag + buf.raw[12:rv]
+    else:
+        return buf.raw[:rv]
diff --git a/adapters/common/frameio/third_party/oftest/netutils.py b/adapters/common/frameio/third_party/oftest/netutils.py
new file mode 100644
index 0000000..092d490
--- /dev/null
+++ b/adapters/common/frameio/third_party/oftest/netutils.py
@@ -0,0 +1,73 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Network utilities for the OpenFlow test framework
+"""
+
+###########################################################################
+##                                                                         ##
+## Promiscuous mode enable/disable                                         ##
+##                                                                         ##
+## Based on code from Scapy by Phillippe Biondi                            ##
+##                                                                         ##
+##                                                                         ##
+## This program is free software; you can redistribute it and/or modify it ##
+## under the terms of the GNU General Public License as published by the   ##
+## Free Software Foundation; either version 2, or (at your option) any     ##
+## later version.                                                          ##
+##                                                                         ##
+## This program is distributed in the hope that it will be useful, but     ##
+## WITHOUT ANY WARRANTY; without even the implied warranty of              ##
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU       ##
+## General Public License for more details.                                ##
+##                                                                         ##
+#############################################################################
+
+import socket
+from fcntl import ioctl
+import struct
+
+# From net/if_arp.h
+ARPHDR_ETHER = 1
+ARPHDR_LOOPBACK = 772
+
+# From bits/ioctls.h
+SIOCGIFHWADDR  = 0x8927          # Get hardware address
+SIOCGIFINDEX   = 0x8933          # name -> if_index mapping
+
+# From netpacket/packet.h
+PACKET_ADD_MEMBERSHIP  = 1
+PACKET_DROP_MEMBERSHIP = 2
+PACKET_MR_PROMISC      = 1
+
+# From bits/socket.h
+SOL_PACKET = 263
+
+def get_if(iff,cmd):
+  s=socket.socket()
+  ifreq = ioctl(s, cmd, struct.pack("16s16x",iff))
+  s.close()
+  return ifreq
+
+def get_if_index(iff):
+  return int(struct.unpack("I",get_if(iff, SIOCGIFINDEX)[16:20])[0])
+
+def set_promisc(s,iff,val=1):
+  mreq = struct.pack("IHH8s", get_if_index(iff), PACKET_MR_PROMISC, 0, "")
+  if val:
+      cmd = PACKET_ADD_MEMBERSHIP
+  else:
+      cmd = PACKET_DROP_MEMBERSHIP
+  s.setsockopt(SOL_PACKET, cmd, mreq)
+
diff --git a/adapters/common/manhole.py b/adapters/common/manhole.py
new file mode 100644
index 0000000..c00c900
--- /dev/null
+++ b/adapters/common/manhole.py
@@ -0,0 +1,129 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import rlcompleter
+from pprint import pprint
+
+import structlog
+from twisted.conch import manhole_ssh
+from twisted.conch.manhole import ColoredManhole
+from twisted.conch.ssh import keys
+from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse
+from twisted.cred.portal import Portal
+from twisted.internet import reactor
+
+log = structlog.get_logger()
+
+
+MANHOLE_SERVER_RSA_PRIVATE = './manhole_rsa_key'
+MANHOLE_SERVER_RSA_PUBLIC = './manhole_rsa_key.pub'
+
+
+def get_rsa_keys():
+    if not (os.path.exists(MANHOLE_SERVER_RSA_PUBLIC) and \
+                    os.path.exists(MANHOLE_SERVER_RSA_PRIVATE)):
+        # generate a RSA keypair
+        log.info('generate-rsa-keypair')
+        from Crypto.PublicKey import RSA
+        rsa_key = RSA.generate(1024)
+        public_key_str = rsa_key.publickey().exportKey(format='OpenSSH')
+        private_key_str = rsa_key.exportKey()
+
+        # save keys for next time
+        file(MANHOLE_SERVER_RSA_PUBLIC, 'w+b').write(public_key_str)
+        file(MANHOLE_SERVER_RSA_PRIVATE, 'w+b').write(private_key_str)
+        log.debug('saved-rsa-keypair', public=MANHOLE_SERVER_RSA_PUBLIC,
+                  private=MANHOLE_SERVER_RSA_PRIVATE)
+    else:
+        public_key_str = file(MANHOLE_SERVER_RSA_PUBLIC).read()
+        private_key_str = file(MANHOLE_SERVER_RSA_PRIVATE).read()
+    return public_key_str, private_key_str
+
+
+class ManholeWithCompleter(ColoredManhole):
+
+    def __init__(self, namespace):
+        namespace['manhole'] = self
+        super(ManholeWithCompleter, self).__init__(namespace)
+        self.last_tab = None
+        self.completer = rlcompleter.Completer(self.namespace)
+
+    def handle_TAB(self):
+        if self.last_tab != self.lineBuffer:
+            self.last_tab = self.lineBuffer
+            return
+
+        buffer = ''.join(self.lineBuffer)
+        completions = []
+        maxlen = 3
+        for c in xrange(1000):
+            candidate = self.completer.complete(buffer, c)
+            if not candidate:
+                break
+
+            if len(candidate) > maxlen:
+                maxlen = len(candidate)
+
+            completions.append(candidate)
+
+        if len(completions) == 1:
+            rest = completions[0][len(buffer):]
+            self.terminal.write(rest)
+            self.lineBufferIndex += len(rest)
+            self.lineBuffer.extend(rest)
+
+        elif len(completions):
+            maxlen += 3
+            numcols = self.width / maxlen
+            self.terminal.nextLine()
+            for idx, candidate in enumerate(completions):
+                self.terminal.write('%%-%ss' % maxlen % candidate)
+                if not ((idx + 1) % numcols):
+                    self.terminal.nextLine()
+            self.terminal.nextLine()
+            self.drawInputLine()
+
+
+class Manhole(object):
+
+    def __init__(self, port, pws, **kw):
+        kw.update(globals())
+        kw['pp'] = pprint
+
+        realm = manhole_ssh.TerminalRealm()
+        manhole = ManholeWithCompleter(kw)
+
+        def windowChanged(_, win_size):
+            manhole.terminalSize(*reversed(win_size[:2]))
+
+        realm.sessionFactory.windowChanged = windowChanged
+        realm.chainedProtocolFactory.protocolFactory = lambda _: manhole
+        portal = Portal(realm)
+        portal.registerChecker(InMemoryUsernamePasswordDatabaseDontUse(**pws))
+        factory = manhole_ssh.ConchFactory(portal)
+        public_key_str, private_key_str = get_rsa_keys()
+        factory.publicKeys = {
+            'ssh-rsa': keys.Key.fromString(public_key_str)
+        }
+        factory.privateKeys = {
+            'ssh-rsa': keys.Key.fromString(private_key_str)
+        }
+        reactor.listenTCP(port, factory, interface='localhost')
+
+
+if __name__ == '__main__':
+    Manhole(12222, dict(admin='admin'))
+    reactor.run()
diff --git a/adapters/common/openflow/__init__.py b/adapters/common/openflow/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/openflow/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/openflow/utils.py b/adapters/common/openflow/utils.py
new file mode 100644
index 0000000..b4c66cb
--- /dev/null
+++ b/adapters/common/openflow/utils.py
@@ -0,0 +1,45 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+
+from adapters.protos import openflow_13_pb2 as ofp
+
+OUTPUT = ofp.OFPAT_OUTPUT
+ETH_TYPE = ofp.OFPXMT_OFB_ETH_TYPE
+IP_PROTO = ofp.OFPXMT_OFB_IP_PROTO
+
+def get_ofb_fields(flow):
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    assert flow.match.type == ofp.OFPMT_OXM
+    ofb_fields = []
+    for field in flow.match.oxm_fields:
+        assert field.oxm_class == ofp.OFPXMC_OPENFLOW_BASIC
+        ofb_fields.append(field.ofb_field)
+    return ofb_fields
+
+def get_actions(flow):
+    """Extract list of ofp_action objects from flow spec object"""
+    assert isinstance(flow, ofp.ofp_flow_stats)
+    # we have the following hard assumptions for now
+    for instruction in flow.instructions:
+        if instruction.type == ofp.OFPIT_APPLY_ACTIONS:
+            return instruction.actions.actions
+
+def get_out_port(flow):
+    for action in get_actions(flow):
+        if action.type == OUTPUT:
+            return action.output.port
+    return None
diff --git a/adapters/common/structlog_setup.py b/adapters/common/structlog_setup.py
new file mode 100644
index 0000000..3401977
--- /dev/null
+++ b/adapters/common/structlog_setup.py
@@ -0,0 +1,134 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Setting up proper logging for Voltha"""
+
+import logging
+import logging.config
+from collections import OrderedDict
+
+import structlog
+from structlog.stdlib import BoundLogger, INFO
+
+try:
+    from thread import get_ident as _get_ident
+except ImportError:
+    from dummy_thread import get_ident as _get_ident
+
+
+class StructuredLogRenderer(object):
+    def __call__(self, logger, name, event_dict):
+        # in order to keep structured log data in event_dict to be forwarded as
+        # is, we need to pass it into the logger framework as the first
+        # positional argument.
+        args = (event_dict,)
+        kwargs = {}
+        return args, kwargs
+
+
+class PlainRenderedOrderedDict(OrderedDict):
+    """Our special version of OrderedDict that renders into string as a dict,
+       to make the log stream output cleaner.
+    """
+    def __repr__(self, _repr_running={}):
+        'od.__repr__() <==> repr(od)'
+        call_key = id(self), _get_ident()
+        if call_key in _repr_running:
+            return '...'
+        _repr_running[call_key] = 1
+        try:
+            if not self:
+                return '{}'
+            return '{%s}' % ", ".join("%s: %s" % (k, v)
+                                      for k, v in self.items())
+        finally:
+            del _repr_running[call_key]
+
+
+def setup_logging(log_config, instance_id, verbosity_adjust=0):
+    """
+    Set up logging such that:
+    - The primary logging entry method is structlog
+      (see http://structlog.readthedocs.io/en/stable/index.html)
+    - By default, the logging backend is Python standard lib logger
+    """
+
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        event_dict['instance_id'] = instance_id
+        return event_dict
+
+    # Configure standard logging
+    logging.config.dictConfig(log_config)
+    logging.root.level -= 10 * verbosity_adjust
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
+                        context_class=PlainRenderedOrderedDict,
+                        wrapper_class=BoundLogger,
+                        processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("first-line")
+    return log
+
+
+def update_logging(instance_id, vcore_id):
+    """
+    Add the vcore id to the structured logger
+    :param vcore_id:  The assigned vcore id
+    :return: structure logger
+    """
+    def add_exc_info_flag_for_exception(_, name, event_dict):
+        if name == 'exception':
+            event_dict['exc_info'] = True
+        return event_dict
+
+    def add_instance_id(_, __, event_dict):
+        if instance_id is not None:
+            event_dict['instance_id'] = instance_id
+        return event_dict
+
+    def add_vcore_id(_, __, event_dict):
+        if vcore_id is not None:
+            event_dict['vcore_id'] = vcore_id
+        return event_dict
+
+    processors = [
+        add_exc_info_flag_for_exception,
+        structlog.processors.StackInfoRenderer(),
+        structlog.processors.format_exc_info,
+        add_instance_id,
+        add_vcore_id,
+        StructuredLogRenderer(),
+    ]
+    structlog.configure(processors=processors)
+
+    # Mark first line of log
+    log = structlog.get_logger()
+    log.info("updated-logger")
+    return log
diff --git a/adapters/common/utils/__init__.py b/adapters/common/utils/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/common/utils/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/common/utils/asleep.py b/adapters/common/utils/asleep.py
new file mode 100644
index 0000000..10d1ce3
--- /dev/null
+++ b/adapters/common/utils/asleep.py
@@ -0,0 +1,31 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+""" Async sleep (asleep) method and other twisted goodies """
+
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+
+
+def asleep(dt):
+    """
+    Async (event driven) wait for given time period (in seconds)
+    :param dt: Delay in seconds
+    :return: Deferred to be fired with value None when time expires.
+    """
+    d = Deferred()
+    reactor.callLater(dt, lambda: d.callback(None))
+    return d
diff --git a/adapters/common/utils/consulhelpers.py b/adapters/common/utils/consulhelpers.py
new file mode 100644
index 0000000..6060ba3
--- /dev/null
+++ b/adapters/common/utils/consulhelpers.py
@@ -0,0 +1,178 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some consul related convenience functions
+"""
+
+from structlog import get_logger
+from consul import Consul
+from random import randint
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4
+
+log = get_logger()
+
+
+def connect_to_consult(consul_endpoint):
+    log.debug('getting-service-endpoint', consul=consul_endpoint)
+
+    host = consul_endpoint.split(':')[0].strip()
+    port = int(consul_endpoint.split(':')[1].strip())
+
+    return Consul(host=host, port=port)
+
+
+def verify_all_services_healthy(consul_endpoint, service_name=None,
+                                number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+def get_all_services(consul_endpoint):
+    log.debug('getting-service-verify-health')
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.services()
+
+    return services
+
+
+def get_all_instances_of_service(consul_endpoint, service_name):
+    log.debug('getting-all-instances-of-service', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    for service in services:
+        log.debug('service',
+                  name=service['ServiceName'],
+                  serviceid=service['ServiceID'],
+                  serviceport=service['ServicePort'],
+                  createindex=service['CreateIndex'])
+
+    return services
+
+
+def get_endpoint_from_consul(consul_endpoint, service_name):
+    """
+    Get endpoint of service_name from consul.
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service for which endpoint
+                         needs to be found.
+    :return: service endpoint if available, else exit.
+    """
+    log.debug('getting-service-info', service=service_name)
+
+    consul = connect_to_consult(consul_endpoint)
+    _, services = consul.catalog.service(service_name)
+
+    if len(services) == 0:
+        raise Exception(
+            'Cannot find service {} in consul'.format(service_name))
+        os.exit(1)
+
+    """ Get host IPV4 address
+    """
+    local_ipv4 = get_my_primary_local_ipv4()
+    """ If host IP address from where the request came in matches
+        the IP address of the requested service's host IP address,
+        pick the endpoint
+    """
+    for i in range(len(services)):
+        service = services[i]
+        if service['ServiceAddress'] == local_ipv4:
+            log.debug("picking address locally")
+            endpoint = '{}:{}'.format(service['ServiceAddress'],
+                                      service['ServicePort'])
+            return endpoint
+
+    """ If service is not available locally, picak a random
+        endpoint for the service from the list
+    """
+    service = services[randint(0, len(services) - 1)]
+    endpoint = '{}:{}'.format(service['ServiceAddress'],
+                              service['ServicePort'])
+
+    return endpoint
+
+
+def get_healthy_instances(consul_endpoint, service_name=None,
+                          number_of_expected_services=None):
+    """
+    Verify in consul if any service is healthy
+    :param consul_endpoint: a <host>:<port> string
+    :param service_name: name of service to check, optional
+    :param number_of_expected_services number of services to check for, optional
+    :return: true if healthy, false otherwise
+    """
+
+    def check_health(service):
+        _, serv_health = consul.health.service(service, passing=True)
+        return not serv_health == []
+
+    consul = connect_to_consult(consul_endpoint)
+
+    if service_name is not None:
+        return check_health(service_name)
+
+    services = get_all_services(consul_endpoint)
+
+    items = services.keys()
+
+    if number_of_expected_services is not None and \
+                    len(items) != number_of_expected_services:
+        return False
+
+    for item in items:
+        if not check_health(item):
+            return False
+
+    return True
+
+
+if __name__ == '__main__':
+    # print get_endpoint_from_consul('10.100.198.220:8500', 'kafka')
+    # print get_healthy_instances('10.100.198.220:8500', 'voltha-health')
+    # print get_healthy_instances('10.100.198.220:8500')
+    get_all_instances_of_service('10.100.198.220:8500', 'voltha-grpc')
diff --git a/adapters/common/utils/deferred_utils.py b/adapters/common/utils/deferred_utils.py
new file mode 100644
index 0000000..3c55c1a
--- /dev/null
+++ b/adapters/common/utils/deferred_utils.py
@@ -0,0 +1,56 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.internet.error import AlreadyCalled
+
+
+class TimeOutError(Exception): pass
+
+
+class DeferredWithTimeout(Deferred):
+    """
+    Deferred with a timeout. If neither the callback nor the errback method
+    is not called within the given time, the deferred's errback will be called
+    with a TimeOutError() exception.
+
+    All other uses are the same as of Deferred().
+    """
+    def __init__(self, timeout=1.0):
+        Deferred.__init__(self)
+        self._timeout = timeout
+        self.timer = reactor.callLater(timeout, self.timed_out)
+
+    def timed_out(self):
+        self.errback(
+            TimeOutError('timed out after {} seconds'.format(self._timeout)))
+
+    def callback(self, result):
+        self._cancel_timer()
+        return Deferred.callback(self, result)
+
+    def errback(self, fail):
+        self._cancel_timer()
+        return Deferred.errback(self, fail)
+
+    def cancel(self):
+        self._cancel_timer()
+        return Deferred.cancel(self)
+
+    def _cancel_timer(self):
+        try:
+            self.timer.cancel()
+        except AlreadyCalled:
+            pass
+
diff --git a/adapters/common/utils/dockerhelpers.py b/adapters/common/utils/dockerhelpers.py
new file mode 100644
index 0000000..4620aef
--- /dev/null
+++ b/adapters/common/utils/dockerhelpers.py
@@ -0,0 +1,75 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some docker related convenience functions
+"""
+from datetime import datetime
+from concurrent.futures import ThreadPoolExecutor
+
+import os
+import socket
+from structlog import get_logger
+
+from docker import Client, errors
+
+
+docker_socket = os.environ.get('DOCKER_SOCK', 'unix://tmp/docker.sock')
+log = get_logger()
+
+def get_my_containers_name():
+    """
+    Return the docker containers name in which this process is running.
+    To look up the container name, we use the container ID extracted from the
+    $HOSTNAME environment variable (which is set by docker conventions).
+    :return: String with the docker container name (or None if any issue is
+             encountered)
+    """
+    my_container_id = os.environ.get('HOSTNAME', None)
+
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(my_container_id)
+
+    except Exception, e:
+        log.exception('failed', my_container_id=my_container_id, e=e)
+        raise
+
+    name = info['Name'].lstrip('/')
+
+    return name
+
+def get_all_running_containers():
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        containers = docker_cli.containers()
+
+    except Exception, e:
+        log.exception('failed', e=e)
+        raise
+
+    return containers
+
+def inspect_container(id):
+    try:
+        docker_cli = Client(base_url=docker_socket)
+        info = docker_cli.inspect_container(id)
+    except Exception, e:
+        log.exception('failed-inspect-container', id=id, e=e)
+        raise
+
+    return info
+
diff --git a/adapters/common/utils/grpc_utils.py b/adapters/common/utils/grpc_utils.py
new file mode 100644
index 0000000..8df630e
--- /dev/null
+++ b/adapters/common/utils/grpc_utils.py
@@ -0,0 +1,109 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Utilities to handle gRPC server and client side code in a Twisted environment
+"""
+import structlog
+from concurrent.futures import Future
+from twisted.internet import reactor
+from twisted.internet.defer import Deferred
+from twisted.python.threadable import isInIOThread
+
+
+log = structlog.get_logger()
+
+
+def twisted_async(func):
+    """
+    This decorator can be used to implement a gRPC method on the twisted
+    thread, allowing asynchronous programming in Twisted while serving
+    a gRPC call.
+
+    gRPC methods normally are called on the futures.ThreadPool threads,
+    so these methods cannot directly use Twisted protocol constructs.
+    If the implementation of the methods needs to touch Twisted, it is
+    safer (or mandatory) to wrap the method with this decorator, which will
+    call the inner method from the external thread and ensure that the
+    result is passed back to the foreign thread.
+
+    Example usage:
+
+    When implementing a gRPC server, typical pattern is:
+
+    class SpamService(SpamServicer):
+
+        def GetBadSpam(self, request, context):
+            '''this is called from a ThreadPoolExecutor thread'''
+            # generally unsafe to make Twisted calls
+
+        @twisted_async
+        def GetSpamSafely(self, request, context):
+            '''this method now is executed on the Twisted main thread
+            # safe to call any Twisted protocol functions
+
+        @twisted_async
+        @inlineCallbacks
+        def GetAsyncSpam(self, request, context):
+            '''this generator can use inlineCallbacks Twisted style'''
+            result = yield some_async_twisted_call(request)
+            returnValue(result)
+
+    """
+    def in_thread_wrapper(*args, **kw):
+
+        if isInIOThread():
+
+            return func(*args, **kw)
+
+        f = Future()
+
+        def twisted_wrapper():
+            try:
+                d = func(*args, **kw)
+                if isinstance(d, Deferred):
+
+                    def _done(result):
+                        f.set_result(result)
+                        f.done()
+
+                    def _error(e):
+                        f.set_exception(e)
+                        f.done()
+
+                    d.addCallback(_done)
+                    d.addErrback(_error)
+
+                else:
+                    f.set_result(d)
+                    f.done()
+
+            except Exception, e:
+                f.set_exception(e)
+                f.done()
+
+        reactor.callFromThread(twisted_wrapper)
+        try:
+            result = f.result()
+        except Exception, e:
+            log.exception(e=e, func=func, args=args, kw=kw)
+            raise
+
+        return result
+
+    return in_thread_wrapper
+
+
diff --git a/adapters/common/utils/id_generation.py b/adapters/common/utils/id_generation.py
new file mode 100644
index 0000000..e0fea1c
--- /dev/null
+++ b/adapters/common/utils/id_generation.py
@@ -0,0 +1,116 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# """ ID generation utils """
+
+from uuid import uuid4
+
+
+BROADCAST_CORE_ID=hex(0xFFFF)[2:]
+
+def get_next_core_id(current_id_in_hex_str):
+    """
+    :param current_id_in_hex_str: a hex string of the maximum core id 
+    assigned without the leading 0x characters
+    :return: current_id_in_hex_str + 1 in hex string 
+    """
+    if not current_id_in_hex_str or current_id_in_hex_str == '':
+        return '0001'
+    else:
+        return format(int(current_id_in_hex_str, 16) + 1, '04x')
+
+
+def create_cluster_logical_device_ids(core_id, switch_id):
+    """
+    Creates a logical device id and an OpenFlow datapath id that is unique 
+    across the Voltha cluster.
+    The returned logical device id  represents a 64 bits integer where the
+    lower 48 bits is the switch id and the upper 16 bits is the core id.   For
+    the datapath id the core id is set to '0000' as it is not used for voltha
+    core routing
+    :param core_id: string
+    :param switch_id:int
+    :return: cluster logical device id and OpenFlow datapath id
+    """
+    switch_id = format(switch_id, '012x')
+    core_in_hex=format(int(core_id, 16), '04x')
+    ld_id = '{}{}'.format(core_in_hex[-4:], switch_id[-12:])
+    dpid_id = '{}{}'.format('0000', switch_id[-12:])
+    return ld_id, int(dpid_id, 16)
+
+def is_broadcast_core_id(id):
+    assert id and len(id) == 16
+    return id[:4] == BROADCAST_CORE_ID
+
+def create_empty_broadcast_id():
+    """
+    Returns an empty broadcast id (ffff000000000000). The id is used to
+    dispatch xPON objects across all the Voltha instances.
+    :return: An empty broadcast id
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, '0'*12)
+
+def create_cluster_id():
+    """
+    Returns an id that is common across all voltha instances.  The id  
+    is a str of 64 bits.  The lower 48 bits refers to an id specific to that 
+    object while the upper 16 bits refers a broadcast core_id
+    :return: An common id across all Voltha instances
+    """
+    return '{}{}'.format(BROADCAST_CORE_ID, uuid4().hex[:12])
+
+def create_cluster_device_id(core_id):
+    """
+    Creates a device id that is unique across the Voltha cluster.
+    The device id is a str of 64 bits.  The lower 48 bits refers to the 
+    device id while the upper 16 bits refers to the core id.
+    :param core_id: string
+    :return: cluster device id
+    """
+    return '{}{}'.format(format(int(core_id), '04x'), uuid4().hex[:12])
+
+
+def get_core_id_from_device_id(device_id):
+    # Device id is a string and the first 4 characters represent the core_id
+    assert device_id and len(device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return device_id[:4]
+
+
+def get_core_id_from_logical_device_id(logical_device_id):
+    """ 
+    Logical Device id is a string and the first 4 characters represent the 
+    core_id
+    :param logical_device_id: 
+    :return: core_id string
+    """
+    assert logical_device_id and len(logical_device_id) == 16
+    # Get the leading 4 hexs and remove leading 0's
+    return logical_device_id[:4]
+
+
+def get_core_id_from_datapath_id(datapath_id):
+    """
+    datapath id is a uint64 where:
+        - low 48 bits -> switch_id
+        - high 16 bits -> core id
+    :param datapath_id: 
+    :return: core_id string
+    """
+    assert datapath_id
+    # Get the hex string and remove the '0x' prefix
+    id_in_hex_str = hex(datapath_id)[2:]
+    assert len(id_in_hex_str) > 12
+    return id_in_hex_str[:-12]
diff --git a/adapters/common/utils/indexpool.py b/adapters/common/utils/indexpool.py
new file mode 100644
index 0000000..858cb3a
--- /dev/null
+++ b/adapters/common/utils/indexpool.py
@@ -0,0 +1,64 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from bitstring import BitArray
+import structlog
+
+log = structlog.get_logger()
+
+class IndexPool(object):
+    def __init__(self, max_entries, offset):
+        self.max_entries = max_entries
+        self.offset = offset
+        self.indices = BitArray(self.max_entries)
+
+    def get_next(self):
+        try:
+            _pos = self.indices.find('0b0')
+            self.indices.set(1, _pos)
+            return self.offset + _pos[0]
+        except IndexError:
+            log.info("exception-fail-to-allocate-id-all-bits-in-use")
+            return None
+
+    def allocate(self, index):
+        try:
+            _pos = index - self.offset
+            if not (0 <= _pos < self.max_entries):
+                log.info("{}-out-of-range".format(index))
+                return None
+            if self.indices[_pos]:
+                log.info("{}-is-already-allocated".format(index))
+                return None
+            self.indices.set(1, _pos)
+            return index
+
+        except IndexError:
+            return None
+
+    def release(self, index):
+        index -= self.offset
+        _pos = (index,)
+        try:
+            self.indices.set(0, _pos)
+        except IndexError:
+            log.info("bit-position-{}-out-of-range".format(index))
+
+    #index or multiple indices to set all of them to 1 - need to be a tuple
+    def pre_allocate(self, index):
+        if(isinstance(index, tuple)):
+            _lst = list(index)
+            for i in range(len(_lst)):
+                _lst[i] -= self.offset
+            index = tuple(_lst)
+            self.indices.set(1, index)
diff --git a/adapters/common/utils/json_format.py b/adapters/common/utils/json_format.py
new file mode 100644
index 0000000..c18d013
--- /dev/null
+++ b/adapters/common/utils/json_format.py
@@ -0,0 +1,105 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Monkey patched json_format to allow best effort decoding of Any fields.
+Use the additional flag (strict_any_handling=False) to trigger the
+best-effort behavior. Omit the flag, or just use the original json_format
+module fot the strict behavior.
+"""
+
+from google.protobuf import json_format
+
+class _PatchedPrinter(json_format._Printer):
+
+    def __init__(self, including_default_value_fields=False,
+                 preserving_proto_field_name=False,
+                 strict_any_handling=False):
+        super(_PatchedPrinter, self).__init__(including_default_value_fields,
+                                              preserving_proto_field_name)
+        self.strict_any_handling = strict_any_handling
+
+    def _BestEffortAnyMessageToJsonObject(self, msg):
+        try:
+            res = self._AnyMessageToJsonObject(msg)
+        except TypeError:
+            res = self._RegularMessageToJsonObject(msg, {})
+        return res
+
+
+def MessageToDict(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+    """Converts protobuf message to a JSON dictionary.
+
+    Args:
+      message: The protocol buffers message instance to serialize.
+      including_default_value_fields: If True, singular primitive fields,
+          repeated fields, and map fields will always be serialized.  If
+          False, only serialize non-empty fields.  Singular message fields
+          and oneof fields are not affected by this option.
+      preserving_proto_field_name: If True, use the original proto field
+          names as defined in the .proto file. If False, convert the field
+          names to lowerCamelCase.
+      strict_any_handling: If True, converion will error out (like in the
+          original method) if an Any field with value for which the Any type
+          is not loaded is encountered. If False, the conversion will leave
+          the field un-packed, but otherwise will continue.
+
+    Returns:
+      A dict representation of the JSON formatted protocol buffer message.
+    """
+    printer = _PatchedPrinter(including_default_value_fields,
+                              preserving_proto_field_name,
+                              strict_any_handling=strict_any_handling)
+    # pylint: disable=protected-access
+    return printer._MessageToJsonObject(message)
+
+
+def MessageToJson(message,
+                  including_default_value_fields=False,
+                  preserving_proto_field_name=False,
+                  strict_any_handling=False):
+  """Converts protobuf message to JSON format.
+
+  Args:
+    message: The protocol buffers message instance to serialize.
+    including_default_value_fields: If True, singular primitive fields,
+        repeated fields, and map fields will always be serialized.  If
+        False, only serialize non-empty fields.  Singular message fields
+        and oneof fields are not affected by this option.
+    preserving_proto_field_name: If True, use the original proto field
+        names as defined in the .proto file. If False, convert the field
+        names to lowerCamelCase.
+    strict_any_handling: If True, converion will error out (like in the
+        original method) if an Any field with value for which the Any type
+        is not loaded is encountered. If False, the conversion will leave
+        the field un-packed, but otherwise will continue.
+
+  Returns:
+    A string containing the JSON formatted protocol buffer message.
+  """
+  printer = _PatchedPrinter(including_default_value_fields,
+                            preserving_proto_field_name,
+                            strict_any_handling=strict_any_handling)
+  return printer.ToJsonString(message)
+
+
+json_format._WKTJSONMETHODS['google.protobuf.Any'] = [
+    '_BestEffortAnyMessageToJsonObject',
+    '_ConvertAnyMessage'
+]
+
+json_format._Printer._BestEffortAnyMessageToJsonObject = \
+    json_format._Printer._AnyMessageToJsonObject
diff --git a/adapters/common/utils/message_queue.py b/adapters/common/utils/message_queue.py
new file mode 100644
index 0000000..2b4257a
--- /dev/null
+++ b/adapters/common/utils/message_queue.py
@@ -0,0 +1,89 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from twisted.internet.defer import Deferred
+from twisted.internet.defer import succeed
+
+
+class MessageQueue(object):
+    """
+    An event driven queue, similar to twisted.internet.defer.DeferredQueue
+    but which allows selective dequeing based on a predicate function.
+    Unlike DeferredQueue, there is no limit on backlog, and there is no queue
+    limit.
+    """
+
+    def __init__(self):
+        self.waiting = []  # tuples of (d, predicate)
+        self.queue = []  # messages piling up here if no one is waiting
+
+    def reset(self):
+        """
+        Purge all content as well as waiters (by errback-ing their entries).
+        :return: None
+        """
+        for d, _ in self.waiting:
+            d.errback(Exception('mesage queue reset() was called'))
+        self.waiting = []
+        self.queue = []
+
+    def _cancelGet(self, d):
+        """
+        Remove a deferred from our waiting list.
+        :param d: The deferred that was been canceled.
+        :return: None
+        """
+        for i in range(len(self.waiting)):
+            if self.waiting[i][0] is d:
+                self.waiting.pop(i)
+
+    def put(self, obj):
+        """
+        Add an object to this queue
+        :param obj: arbitrary object that will be added to the queue
+        :return:
+        """
+
+        # if someone is waiting for this, return right away
+        for i in range(len(self.waiting)):
+            d, predicate = self.waiting[i]
+            if predicate is None or predicate(obj):
+                self.waiting.pop(i)
+                d.callback(obj)
+                return
+
+        # otherwise...
+        self.queue.append(obj)
+
+    def get(self, predicate=None):
+        """
+        Attempt to retrieve and remove an object from the queue that
+        matches the optional predicate.
+        :return: Deferred which fires with the next object available.
+        If predicate was provided, only objects for which
+        predicate(obj) is True will be considered.
+        """
+        for i in range(len(self.queue)):
+            msg = self.queue[i]
+            if predicate is None or predicate(msg):
+                self.queue.pop(i)
+                return succeed(msg)
+
+        # there were no matching entries if we got here, so we wait
+        d = Deferred(canceller=self._cancelGet)
+        self.waiting.append((d, predicate))
+        return d
+
+
diff --git a/adapters/common/utils/nethelpers.py b/adapters/common/utils/nethelpers.py
new file mode 100644
index 0000000..b17aced
--- /dev/null
+++ b/adapters/common/utils/nethelpers.py
@@ -0,0 +1,86 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Some network related convenience functions
+"""
+
+from netifaces import AF_INET
+
+import netifaces as ni
+import netaddr
+
+
+def _get_all_interfaces():
+    m_interfaces = []
+    for iface in ni.interfaces():
+        m_interfaces.append((iface, ni.ifaddresses(iface)))
+    return m_interfaces
+
+
+def _get_my_primary_interface():
+    gateways = ni.gateways()
+    assert 'default' in gateways, \
+        ("No default gateway on host/container, "
+         "cannot determine primary interface")
+    default_gw_index = gateways['default'].keys()[0]
+    # gateways[default_gw_index] has the format (example):
+    # [('10.15.32.1', 'en0', True)]
+    interface_name = gateways[default_gw_index][0][1]
+    return interface_name
+
+
+def get_my_primary_local_ipv4(inter_core_subnet=None, ifname=None):
+    if not inter_core_subnet:
+        return _get_my_primary_local_ipv4(ifname)
+    # My IP should belong to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            _ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(inter_core_subnet)
+            if _ip >= m_network.first and _ip <= m_network.last:
+                return m_ip
+    return None
+
+
+def get_my_primary_interface(pon_subnet=None):
+    if not pon_subnet:
+        return _get_my_primary_interface()
+    # My interface should have an IP that belongs to the specified subnet
+    for iface in ni.interfaces():
+        addresses = ni.ifaddresses(iface)
+        if AF_INET in addresses:
+            m_ip = addresses[AF_INET][0]['addr']
+            m_ip = netaddr.IPAddress(m_ip).value
+            m_network = netaddr.IPNetwork(pon_subnet)
+            if m_ip >= m_network.first and m_ip <= m_network.last:
+                return iface
+    return None
+
+
+def _get_my_primary_local_ipv4(ifname=None):
+    try:
+        ifname = get_my_primary_interface() if ifname is None else ifname
+        addresses = ni.ifaddresses(ifname)
+        ipv4 = addresses[AF_INET][0]['addr']
+        return ipv4
+    except Exception as e:
+        return None
+
+if __name__ == '__main__':
+    print get_my_primary_local_ipv4()
diff --git a/adapters/common/utils/ordered_weakvalue_dict.py b/adapters/common/utils/ordered_weakvalue_dict.py
new file mode 100644
index 0000000..9ea739a
--- /dev/null
+++ b/adapters/common/utils/ordered_weakvalue_dict.py
@@ -0,0 +1,48 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from _weakref import ref
+from weakref import KeyedRef
+from collections import OrderedDict
+
+
+class OrderedWeakValueDict(OrderedDict):
+    """
+    Modified OrderedDict to use weak references as values. Entries disappear
+    automatically if the referred value has no more strong reference pointing
+    ot it.
+
+    Warning, this is not a complete implementation, only what is needed for
+    now. See test_ordered_wealvalue_dict.py to see what is tested behavior.
+    """
+    def __init__(self, *args, **kw):
+        def remove(wr, selfref=ref(self)):
+            self = selfref()
+            if self is not None:
+                super(OrderedWeakValueDict, self).__delitem__(wr.key)
+        self._remove = remove
+        super(OrderedWeakValueDict, self).__init__(*args, **kw)
+
+    def __setitem__(self, key, value):
+        super(OrderedWeakValueDict, self).__setitem__(
+            key, KeyedRef(value, self._remove, key))
+
+    def __getitem__(self, key):
+        o = super(OrderedWeakValueDict, self).__getitem__(key)()
+        if o is None:
+            raise KeyError, key
+        else:
+            return o
+
diff --git a/adapters/common/utils/registry.py b/adapters/common/utils/registry.py
new file mode 100644
index 0000000..270bd71
--- /dev/null
+++ b/adapters/common/utils/registry.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Simple component registry to provide centralized access to any registered
+components.
+"""
+from collections import OrderedDict
+from zope.interface import Interface
+
+
+class IComponent(Interface):
+    """
+    A Voltha Component
+    """
+
+    def start():
+        """
+        Called once the componet is instantiated. Can be used for async
+        initialization.
+        :return: (None or Deferred)
+        """
+
+    def stop():
+        """
+        Called once before the component is unloaded. Can be used for async
+        cleanup operations.
+        :return: (None or Deferred)
+        """
+
+
+class Registry(object):
+
+    def __init__(self):
+        self.components = OrderedDict()
+
+    def register(self, name, component):
+        assert IComponent.providedBy(component)
+        assert name not in self.components
+        self.components[name] = component
+        return component
+
+    def unregister(self, name):
+        if name in self.components:
+            del self.components[name]
+
+    def __call__(self, name):
+        return self.components[name]
+
+    def iterate(self):
+        return self.components.values()
+
+
+# public shared registry
+registry = Registry()
diff --git a/adapters/docker/Dockerfile.base b/adapters/docker/Dockerfile.base
new file mode 100644
index 0000000..a50a3ee
--- /dev/null
+++ b/adapters/docker/Dockerfile.base
@@ -0,0 +1,34 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+FROM ubuntu:xenial
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Update to have latest images
+RUN apt-get update && \
+    apt-get install -y python python-pip openssl iproute2 libpcap-dev wget
+
+COPY adapters/requirements.txt /tmp/requirements.txt
+
+# pip install cython enum34 six && \
+# Install app dependencies
+RUN wget https://github.com/Yelp/dumb-init/releases/download/v1.2.0/dumb-init_1.2.0_amd64.deb && \
+    dpkg -i *.deb && \
+    rm -f *.deb && \
+    apt-get update && \
+    apt-get install -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    pip install -r /tmp/requirements.txt && \
+    apt-get purge -y wget build-essential make gcc binutils python-dev libffi-dev libssl-dev git && \
+    apt-get autoremove -y
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_olt b/adapters/docker/Dockerfile.ponsim_adapter_olt
new file mode 100644
index 0000000..5b714f1
--- /dev/null
+++ b/adapters/docker/Dockerfile.ponsim_adapter_olt
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /adapters && touch /adapters/__init__.py
+ENV PYTHONPATH=/adapters
+COPY adapters/common /adapters/adapters/common
+COPY adapters/kafka /adapters/adapters/kafka
+COPY adapters/*.py /adapters/adapters/
+#COPY pki /voltha/pki
+COPY adapters/ponsim_olt /adapters/adapters/ponsim_olt
+RUN touch /adapters/adapters/__init__.py
+
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /adapters/adapters/protos
+COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
+COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+RUN touch /adapters/adapters/protos/__init__.py
+RUN touch /adapters/adapters/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/adapters/ponsim_olt/main.py"]
diff --git a/adapters/docker/Dockerfile.ponsim_adapter_onu b/adapters/docker/Dockerfile.ponsim_adapter_onu
new file mode 100644
index 0000000..57cc113
--- /dev/null
+++ b/adapters/docker/Dockerfile.ponsim_adapter_onu
@@ -0,0 +1,42 @@
+# Copyright 2016 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+ARG TAG=latest
+ARG REGISTRY=
+ARG REPOSITORY=
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protos:${TAG} as protos
+FROM ${REGISTRY}${REPOSITORY}voltha-base:${TAG}
+
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+# Bundle app source
+RUN mkdir /adapters && touch /adapters/__init__.py
+ENV PYTHONPATH=/adapters
+COPY adapters/common /adapters/adapters/common
+COPY adapters/kafka /adapters/adapters/kafka
+COPY adapters/*.py /adapters/adapters/
+#COPY pki /voltha/pki
+COPY adapters/ponsim_onu /adapters/adapters/ponsim_onu
+RUN touch /adapters/adapters/__init__.py
+
+
+# Copy in the generated GRPC proto code
+COPY --from=protos /protos/voltha /adapters/adapters/protos
+COPY --from=protos /protos/google/api /adapters/adapters/protos/third_party/google/api
+COPY adapters/protos/third_party/__init__.py /adapters/adapters/protos/third_party
+RUN touch /adapters/adapters/protos/__init__.py
+RUN touch /adapters/adapters/protos/third_party/google/__init__.py
+
+# Exposing process and default entry point
+# CMD ["python", "/adapters/ponsim_onu/main.py"]
diff --git a/adapters/docker/Dockerfile.protoc b/adapters/docker/Dockerfile.protoc
new file mode 100644
index 0000000..eef6f54
--- /dev/null
+++ b/adapters/docker/Dockerfile.protoc
@@ -0,0 +1,39 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG PROTOC_PREFIX=/usr/local
+ARG ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ARG PROTOC=${PROTOC_PREFIX}/bin/protoc
+ARG PROTOC_VERSION=3.3.0
+
+FROM ${REGISTRY}debian:stretch-slim
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+ENV PROTOC_PREFIX=/usr/local
+ENV ROTOC_LIBDIR=${PROTOC_PREFIX}/lib
+ENV PROTOC=${PROTOC_PREFIX}/bin/protoc
+ENV PROTOC_VERSION=3.3.0
+ENV PROTOC_DOWNLOAD_PREFIX=https://github.com/google/protobuf/releases/download
+ENV PROTOC_DIR=protobuf-${PROTOC_VERSION}
+ENV PROTOC_TARBALL=protobuf-python-${PROTOC_VERSION}.tar.gz
+ENV PROTOC_DOWNLOAD_URI=${PROTOC_DOWNLOAD_PREFIX}/v${PROTOC_VERSION}/${PROTOC_TARBALL}
+
+RUN apt-get update -y && apt-get install -y wget build-essential python-dev python-pip
+RUN pip install grpcio-tools==1.3.5
+WORKDIR /build
+RUN wget -q --no-check-certificate ${PROTOC_DOWNLOAD_URI}
+RUN tar --strip-components=1 -zxf ${PROTOC_TARBALL}
+RUN ./configure --prefix=${PROTOC_PREFIX}
+RUN make install
diff --git a/adapters/docker/Dockerfile.protos b/adapters/docker/Dockerfile.protos
new file mode 100644
index 0000000..27e3db4
--- /dev/null
+++ b/adapters/docker/Dockerfile.protos
@@ -0,0 +1,35 @@
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ARG REGISTRY=
+ARG REPOSITORY=
+ARG TAG=latest
+
+FROM ${REGISTRY}${REPOSITORY}voltha-protoc:${TAG} as builder
+MAINTAINER Voltha Community <info@opennetworking.org>
+
+COPY adapters/protos/third_party/google/api/*.proto /protos/google/api/
+COPY adapters/docker/config/Makefile.protos /protos/google/api/Makefile.protos
+WORKDIR /protos
+RUN make -f google/api/Makefile.protos google_api
+RUN touch /protos/google/__init__.py /protos/google/api/__init__.py
+
+COPY protos/*.proto /protos/voltha/
+COPY adapters/docker/config/Makefile.protos /protos/voltha/Makefile.protos
+WORKDIR /protos/voltha
+RUN make -f Makefile.protos build
+
+# Copy the files to a scrach based container to minimize its size
+FROM ${REGISTRY}scratch
+COPY --from=builder /protos/ /protos/
diff --git a/adapters/docker/config/Makefile.protos b/adapters/docker/config/Makefile.protos
new file mode 100644
index 0000000..12ff9e3
--- /dev/null
+++ b/adapters/docker/config/Makefile.protos
@@ -0,0 +1,59 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: build
+
+PROTO_FILES := $(wildcard *.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_All_PB2_C_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2.pyc,$(f)))
+PROTO_ALL_PB2_GPRC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_ALL_DESC_FILES := $(foreach f,$(PROTO_ALL_FILES),$(subst .proto,.desc,$(f)))
+
+# Google API needs to be built from within the third party directory
+#
+google_api:
+	python -m grpc.tools.protoc \
+	    -I. \
+            --python_out=. \
+            --grpc_python_out=. \
+            --descriptor_set_out=google/api/annotations.desc \
+            --include_imports \
+            --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto
+	python -m grpc.tools.protoc \
+                -I. \
+                -I/protos \
+                --python_out=. \
+                --grpc_python_out=. \
+                --descriptor_set_out=$(basename $<).desc \
+                --include_imports \
+                --include_source_info \
+                $<
+
+clean:
+	rm -f $(PROTO_PB2_FILES) \
+		$(PROTO_ALL_DESC_FILES) \
+		$(PROTO_ALL_PB2_GPRC_FILES) \
+		$(PROTO_All_PB2_C_FILES) \
+		$(PROTO_PB2_GOOGLE_API)
diff --git a/adapters/iadapter.py b/adapters/iadapter.py
new file mode 100644
index 0000000..c6ea3ca
--- /dev/null
+++ b/adapters/iadapter.py
@@ -0,0 +1,301 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Adapter abstract base class
+"""
+
+import structlog
+from zope.interface import implementer
+from twisted.internet import reactor
+
+from adapters.protos.common_pb2 import AdminState
+from adapters.protos.device_pb2 import DeviceType, DeviceTypes
+from adapters.interface import IAdapterInterface
+from adapters.protos.adapter_pb2 import Adapter
+from adapters.protos.adapter_pb2 import AdapterConfig
+from adapters.protos.common_pb2 import LogLevel
+from adapters.protos.health_pb2 import HealthStatus
+from adapters.protos.device_pb2 import Device
+
+log = structlog.get_logger()
+
+
+@implementer(IAdapterInterface)
+class IAdapter(object):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type, vendor_id,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False, core_proxy=None):
+        log.debug('Initializing adapter: {} {} {}'.format(vendor, name, version))
+        self.adapter_agent = adapter_agent
+        self.core_proxy=core_proxy
+        self.config = config
+        self.name = name
+        self.supported_device_types = [
+            DeviceType(
+                id=device_type,
+                vendor_id=vendor_id,
+                adapter=name,
+                accepts_bulk_flow_update=accepts_bulk_flow_update,
+                accepts_add_remove_flow_updates=accepts_add_remove_flow_updates
+            )
+        ]
+        self.descriptor = Adapter(
+            id=self.name,
+            vendor=vendor,
+            version=version,
+            config=AdapterConfig(log_level=LogLevel.INFO)
+        )
+        self.devices_handlers = dict()  # device_id -> Olt/OnuHandler()
+        self.device_handler_class = device_handler_class
+
+    def start(self):
+        log.info('Starting adapter: {}'.format(self.name))
+
+    def stop(self):
+        log.info('Stopping adapter: {}'.format(self.name))
+
+    def adapter_descriptor(self):
+        return self.descriptor
+
+    def device_types(self):
+        return DeviceTypes(items=self.supported_device_types)
+
+    def health(self):
+        # return HealthStatus(state=HealthStatus.HealthState.HEALTHY)
+        return HealthStatus(state=HealthStatus.HEALTHY)
+
+    def change_master_state(self, master):
+        raise NotImplementedError()
+
+    def get_ofp_device_info(self, device):
+        log.debug('get_ofp_device_info', device_id=device.id)
+        return self.devices_handlers[device.id].get_ofp_device_info(device)
+
+    def get_ofp_port_info(self, device, port_no):
+        log.debug('get_ofp_port_info', device_id=device.id, port_no=port_no)
+        return self.devices_handlers[device.id].get_ofp_port_info(device, port_no)
+
+    def adopt_device(self, device):
+        log.debug('adopt_device', device_id=device.id)
+        self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].activate, device)
+        log.debug('adopt_device_done', device_id=device.id)
+        return device
+
+    def reconcile_device(self, device):
+        raise NotImplementedError()
+
+    def abandon_device(self, device):
+        raise NotImplementedError()
+
+    def disable_device(self, device):
+        log.info('disable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].disable)
+        return device
+
+    def reenable_device(self, device):
+        log.info('reenable-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reenable)
+        return device
+
+    def reboot_device(self, device):
+        log.info('reboot-device', device_id=device.id)
+        reactor.callLater(0, self.devices_handlers[device.id].reboot)
+        return device
+
+    def download_image(self, device, request):
+        raise NotImplementedError()
+
+    def get_image_download_status(self, device, request):
+        raise NotImplementedError()
+
+    def cancel_image_download(self, device, request):
+        raise NotImplementedError()
+
+    def activate_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def revert_image_update(self, device, request):
+        raise NotImplementedError()
+
+    def self_test_device(self, device):
+        log.info('self-test-req', device_id=device.id)
+        result = reactor.callLater(0, self.devices_handlers[device.id].self_test_device)
+        return result
+
+    def delete_device(self, device):
+        log.info('delete-device', device_id=device.id)
+        #  TODO: Update the logical device mapping
+        reactor.callLater(0, self.devices_handlers[device.id].delete)
+        return device
+
+    def get_device_details(self, device):
+        raise NotImplementedError()
+
+    def update_flows_bulk(self, device, flows, groups):
+        log.info('bulk-flow-update', device_id=device.id,
+                 flows=flows, groups=groups)
+        assert len(groups.items) == 0
+        handler = self.devices_handlers[device.id]
+        return handler.update_flow_table(flows.items)
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        log.info('incremental-flow-update', device_id=device.id,
+                 flows=flow_changes, groups=group_changes)
+        # For now, there is no support for group changes
+        assert len(group_changes.to_add.items) == 0
+        assert len(group_changes.to_remove.items) == 0
+
+        handler = self.devices_handlers[device.id]
+        # Remove flows
+        if len(flow_changes.to_remove.items) != 0:
+            handler.remove_from_flow_table(flow_changes.to_remove.items)
+
+        # Add flows
+        if len(flow_changes.to_add.items) != 0:
+            handler.add_to_flow_table(flow_changes.to_add.items)
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        raise NotImplementedError()
+
+    def receive_proxied_message(self, proxy_address, msg):
+        raise NotImplementedError()
+
+    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+        raise NotImplementedError()
+
+    def receive_inter_adapter_message(self, msg):
+        raise NotImplementedError()
+
+    def suppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def unsuppress_alarm(self, filter):
+        raise NotImplementedError()
+
+    def _get_handler(self, device):
+        if device.id in self.devices_handlers:
+            handler = self.devices_handlers[device.id]
+            if handler is not None:
+                return handler
+            return None
+
+"""
+OLT Adapter base class
+"""
+class OltAdapter(IAdapter):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type,
+                 accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False,
+                 core_proxy=None):
+        super(OltAdapter, self).__init__(adapter_agent=adapter_agent,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=None,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates,
+                                         core_proxy=None)
+        self.logical_device_id_to_root_device_id = dict()
+
+    def reconcile_device(self, device):
+        try:
+            self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+            # Work only required for devices that are in ENABLED state
+            if device.admin_state == AdminState.ENABLED:
+                reactor.callLater(0,
+                                  self.devices_handlers[device.id].reconcile,
+                                  device)
+            else:
+                # Invoke the children reconciliation which would setup the
+                # basic children data structures
+                self.adapter_agent.reconcile_child_devices(device.id)
+            return device
+        except Exception, e:
+            log.exception('Exception', e=e)
+
+    def send_proxied_message(self, proxy_address, msg):
+        log.info('send-proxied-message', proxy_address=proxy_address, msg=msg)
+        handler = self.devices_handlers[proxy_address.device_id]
+        handler.send_proxied_message(proxy_address, msg)
+
+    def receive_packet_out(self, logical_device_id, egress_port_no, msg):
+        def ldi_to_di(ldi):
+            di = self.logical_device_id_to_root_device_id.get(ldi)
+            if di is None:
+                logical_device = self.adapter_agent.get_logical_device(ldi)
+                di = logical_device.root_device_id
+                self.logical_device_id_to_root_device_id[ldi] = di
+            return di
+
+        device_id = ldi_to_di(logical_device_id)
+        handler = self.devices_handlers[device_id]
+        handler.packet_out(egress_port_no, msg)
+
+
+"""
+ONU Adapter base class
+"""
+
+
+class OnuAdapter(IAdapter):
+    def __init__(self, adapter_agent, config, device_handler_class, name,
+                 vendor, version, device_type, vendor_id, accepts_bulk_flow_update=True,
+                 accepts_add_remove_flow_updates=False):
+        super(OnuAdapter, self).__init__(adapter_agent=adapter_agent,
+                                         config=config,
+                                         device_handler_class=device_handler_class,
+                                         name=name,
+                                         vendor=vendor,
+                                         version=version,
+                                         device_type=device_type,
+                                         vendor_id=vendor_id,
+                                         accepts_bulk_flow_update=accepts_bulk_flow_update,
+                                         accepts_add_remove_flow_updates=accepts_add_remove_flow_updates,
+                                         core_proxy=None
+                                         )
+
+    def reconcile_device(self, device):
+        self.devices_handlers[device.id] = self.device_handler_class(self, device.id)
+        # Reconcile only if state was ENABLED
+        if device.admin_state == AdminState.ENABLED:
+            reactor.callLater(0,
+                              self.devices_handlers[device.id].reconcile,
+                              device)
+        return device
+
+    def receive_proxied_message(self, proxy_address, msg):
+        log.info('receive-proxied-message', proxy_address=proxy_address,
+                 device_id=proxy_address.device_id, msg=msg)
+        # Device_id from the proxy_address is the olt device id. We need to
+        # get the onu device id using the port number in the proxy_address
+        device = self.adapter_agent. \
+            get_child_device_with_proxy_address(proxy_address)
+        if device:
+            handler = self.devices_handlers[device.id]
+            handler.receive_message(msg)
diff --git a/adapters/interface.py b/adapters/interface.py
new file mode 100644
index 0000000..860c8ba
--- /dev/null
+++ b/adapters/interface.py
@@ -0,0 +1,770 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Interface definition for Voltha Adapters
+"""
+from zope.interface import Interface
+
+
+class IAdapterInterface(Interface):
+    """
+    A Voltha adapter.  This interface is used by the Voltha Core to initiate
+    requests towards a voltha adapter.
+    """
+
+    def adapter_descriptor():
+        """
+        Return the adapter descriptor object for this adapter.
+        :return: voltha.Adapter grpc object (see voltha/protos/adapter.proto),
+        with adapter-specific information and config extensions.
+        """
+
+    def device_types():
+        """
+        Return list of device types supported by the adapter.
+        :return: voltha.DeviceTypes protobuf object, with optional type
+        specific extensions.
+        """
+
+    def health():
+        """
+        Return a 3-state health status using the voltha.HealthStatus message.
+        :return: Deferred or direct return with voltha.HealthStatus message
+        """
+
+    def adopt_device(device):
+        """
+        Make sure the adapter looks after given device. Called when a device
+        is provisioned top-down and needs to be activated by the adapter.
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def reconcile_device(device):
+        """
+        Make sure the adapter looks after given device. Called when this
+        device has changed ownership from another Voltha instance to
+        this one (typically, this occurs when the previous voltha
+        instance went down).
+        :param device: A voltha.Device object, with possible device-type
+        specific extensions. Such extensions shall be described as part of
+        the device type specification returned by device_types().
+        :return: (Deferred) Shall be fired to acknowledge device ownership.
+        """
+
+    def abandon_device(device):
+        """
+        Make sur ethe adapter no longer looks after device. This is called
+        if device ownership is taken over by another Voltha instance.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge abandonment.
+        """
+
+    def disable_device(device):
+        """
+        This is called when a previously enabled device needs to be disabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge disabling the device.
+        """
+
+    def reenable_device(device):
+        """
+        This is called when a previously disabled device needs to be enabled
+        based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge re-enabling the
+        device.
+        """
+
+    def reboot_device(device):
+        """
+        This is called to reboot a device based on a NBI call.  The admin
+        state of the device will not change after the reboot
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the reboot.
+        """
+
+    def download_image(device, request):
+        """
+        This is called to request downloading a specified image into
+        the standby partition of a device based on a NBI call.
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge the download.
+        """
+
+    def get_image_download_status(device, request):
+        """
+        This is called to inquire about a requested image download
+        status based on a NBI call.
+        The adapter is expected to update the DownloadImage DB object
+        with the query result
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def cancel_image_download(device, request):
+        """
+        This is called to cancel a requested image download
+        based on a NBI call.  The admin state of the device will not
+        change after the download.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) Shall be fired to acknowledge
+        """
+
+    def activate_image_update(device, request):
+        """
+        This is called to activate a downloaded image from
+        a standby partition into active partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        activated image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def revert_image_update(device, request):
+        """
+        This is called to deactivate the specified image at
+        active partition, and revert to previous image at
+        standby partition.
+        Depending on the device implementation, this call
+        may or may not cause device reboot.
+        If no reboot, then a reboot is required to make the
+        previous image running on device
+        This call is expected to be non-blocking.
+        :param device: A Voltha.Device object.
+                       A Voltha.ImageDownload object.
+        :return: (Deferred) OperationResponse object.
+        """
+
+    def self_test_device(device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+
+    def delete_device(device):
+        """
+        This is called to delete a device from the PON based on a NBI call.
+        If the device is an OLT then the whole PON will be deleted.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the deletion.
+        """
+
+    def get_device_details(device):
+        """
+        This is called to get additional device details based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: (Deferred) Shall be fired to acknowledge the retrieval of
+        additional details.
+        """
+
+    def update_flows_bulk(device, flows, groups):
+        """
+        Called after any flow table change, but only if the device supports
+        bulk mode, which is expressed by the 'accepts_bulk_flow_update'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flows: An openflow_v13.Flows object
+        :param groups: An  openflow_v13.Flows object
+        :return: (Deferred or None)
+        """
+
+    def update_flows_incrementally(device, flow_changes, group_changes):
+        """
+        Called after a flow table update, but only if the device supports
+        non-bulk mode, which is expressed by the 'accepts_add_remove_flow_updates'
+        capability attribute of the device type.
+        :param device: A Voltha.Device object.
+        :param flow_changes: An openflow_v13.FlowChanges object
+        :param group_changes: An openflow_v13.FlowGroupChanges object
+        :return: (Deferred or None)
+        """
+
+    def update_pm_config(device, pm_configs):
+        """
+        Called every time a request is made to change pm collection behavior
+        :param device: A Voltha.Device object
+        :param pm_collection_config: A Pms
+        """
+
+    def receive_packet_out(device_id, egress_port_no, msg):
+        """
+        Pass a packet_out message content to adapter so that it can forward
+        it out to the device. This is only called on root devices.
+        :param device_id: device ID
+        :param egress_port: egress logical port number
+         :param msg: actual message
+        :return: None
+        """
+
+    def suppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should be suppressed
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the suppression.
+        """
+
+    def unsuppress_alarm(filter):
+        """
+        Inform an adapter that all incoming alarms should resume
+        :param filter: A Voltha.AlarmFilter object.
+        :return: (Deferred) Shall be fired to acknowledge the unsuppression.
+        """
+
+    def get_ofp_device_info(device):
+        """
+        Retrieve the OLT device info. This includes the ofp_desc and
+        ofp_switch_features. The existing ofp structures can be used,
+        or all the attributes get added to the Device definition or a new proto
+        definition gets created. This API will allow the Core to create a
+        LogicalDevice associated with this device (OLT only).
+        :param device: device
+        :return: Proto Message (TBD)
+        """
+
+    def get_ofp_port_info(device, port_no):
+        """
+        Retrieve the port info. This includes the ofp_port. The existing ofp
+        structure can be used, or all the attributes get added to the Port
+        definitions or a new proto definition gets created.  This API will allow
+        the Core to create a LogicalPort associated with this device.
+        :param device: device
+        :param port_no: port number
+        :return: Proto Message (TBD)
+        """
+
+    # def start():
+    #     """
+    #     Called once after adapter instance is laoded. Can be used to async
+    #     initialization.
+    #     :return: (None or Deferred)
+    #     """
+    #
+    # def stop():
+    #     """
+    #     Called once before adapter is unloaded. It can be used to perform
+    #     any cleanup after the adapter.
+    #     :return: (None or Deferred)
+    #     """
+    #
+    # def receive_inter_adapter_message(msg):
+    #     """
+    #     Called when the adapter recieves a message that was sent to it directly
+    #     from another adapter. An adapter may register for these messages by calling
+    #     the register_for_inter_adapter_messages() method in the adapter agent.
+    #     Note that it is the responsibility of the sending and receiving
+    #     adapters to properly encode and decode the message.
+    #     :param msg: The message contents.
+    #     :return: None
+    #     """
+    #
+    # def send_proxied_message(proxy_address, msg):
+    #     """
+    #     Forward a msg to a child device of device, addressed by the given
+    #     proxy_address=Device.ProxyAddress().
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device.
+    #     :param msg: (str) The actual message to send.
+    #     :return: (Deferred(None) or None) The return of this method should
+    #      indicate that the message was successfully *sent*.
+    #     """
+    #
+    # def receive_proxied_message(proxy_address, msg):
+    #     """
+    #     Pass an async message (arrived via a proxy) to this device.
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Note this is the proxy_address with which the adapter
+    #      had to register prior to receiving proxied messages.
+    #     :param msg: (str) The actual message received.
+    #     :return: None
+    #     """
+    #
+    # def receive_packet_out(logical_device_id, egress_port_no, msg):
+    #     """
+    #     Pass a packet_out message content to adapter so that it can forward it
+    #     out to the device. This is only called on root devices.
+    #     :param logical_device_id:
+    #     :param egress_port: egress logical port number
+    #     :param msg: actual message
+    #     :return: None
+    #     """
+    #
+    # def change_master_state(master):
+    #     """
+    #     Called to indicate if plugin shall assume or lose master role. The
+    #     master role can be used to perform functions that must be performed
+    #     from a single point in the cluster. In single-node deployments of
+    #     Voltha, the plugins are always in master role.
+    #     :param master: (bool) True to indicate the mastership needs to be
+    #      assumed; False to indicate that mastership needs to be abandoned.
+    #     :return: (Deferred) which is fired by the adapter when mastership is
+    #      assumed/dropped, respectively.
+    #     """
+
+
+# class IAdapterAgent(Interface):
+#     """
+#     This object is passed in to the __init__ function of each adapter,
+#     and can be used by the adapter implementation to initiate async calls
+#     toward Voltha's CORE via the APIs defined here.
+#     """
+#
+#     def get_device(device_id):
+#         # TODO add doc
+#         """"""
+#
+#     def add_device(device):
+#         # TODO add doc
+#         """"""
+#
+#     def update_device(device):
+#         # TODO add doc
+#         """"""
+#
+#     def add_port(device_id, port):
+#         # TODO add doc
+#         """"""
+#
+#     def create_logical_device(logical_device):
+#         # TODO add doc
+#         """"""
+#
+#     def add_logical_port(logical_device_id, port):
+#         # TODO add doc
+#         """"""
+#
+#     def child_device_detected(parent_device_id,
+#                               parent_port_no,
+#                               child_device_type,
+#                               proxy_address,
+#                               admin_state,
+#                               **kw):
+#         # TODO add doc
+#         """"""
+#
+#     def send_proxied_message(proxy_address, msg):
+#         """
+#         Forward a msg to a child device of device, addressed by the given
+#         proxy_address=Device.ProxyAddress().
+#         :param proxy_address: Address info for the parent device
+#          to route the message to the child device. This was given to the
+#          child device by the parent device at the creation of the child
+#          device.
+#         :param msg: (str) The actual message to send.
+#         :return: (Deferred(None) or None) The return of this method should
+#          indicate that the message was successfully *sent*.
+#         """
+#
+#     def receive_proxied_message(proxy_address, msg):
+#         """
+#         Pass an async message (arrived via a proxy) to this device.
+#         :param proxy_address: Address info for the parent device
+#          to route the message to the child device. This was given to the
+#          child device by the parent device at the creation of the child
+#          device. Note this is the proxy_address with which the adapter
+#          had to register prior to receiving proxied messages.
+#         :param msg: (str) The actual message received.
+#         :return: None
+#         """
+#
+#     def register_for_proxied_messages(proxy_address):
+#         """
+#         A child device adapter can use this to indicate its intent to
+#         receive async messages sent via a parent device. Example: an
+#         ONU adapter can use this to register for OMCI messages received
+#         via the OLT and the OLT adapter.
+#         :param child_device_address: Address info that was given to the
+#          child device by the parent device at the creation of the child
+#          device. Its uniqueness acts as a router information for the
+#          registration.
+#         :return: None
+#         """
+#
+#     def unregister_for_proxied_messages(proxy_address):
+#         """
+#         Cancel a previous registration
+#         :return:
+#         """
+#
+#     def send_packet_in(logical_device_id, logical_port_no, packet):
+#         """
+#         Forward given packet to the northbound toward an SDN controller.
+#         :param device_id: logical device identifier
+#         :param logical_port_no: logical port_no (as numbered in openflow)
+#         :param packet: the actual packet; can be a serialized string or a scapy
+#                        Packet.
+#         :return: None returned on success
+#         """
+#
+#     def submit_kpis(kpi_event_msg):
+#         """
+#         Submit KPI metrics on behalf of the OLT and its adapter. This can
+#         include hardware related metrics, usage and utilization metrics, as
+#         well as optional adapter specific metrics.
+#         :param kpi_event_msg: A protobuf message of KpiEvent type.
+#         :return: None
+#         """
+#
+#     def submit_alarm(device_id, alarm_event_msg):
+#         """
+#         Submit an alarm on behalf of the OLT and its adapter.
+#         :param alarm_event_msg: A protobuf message of AlarmEvent type.
+#         :return: None
+#         """
+#
+#     def register_for_onu_detect_state(proxy_address):
+#         """
+#
+#         :return: None
+#         """
+#
+#     def unregister_for_onu_detect_state(proxy_address):
+#         """
+#
+#         :return: None
+#         """
+#
+#     def forward_onu_detect_state(proxy_address, state):
+#         """
+#         Forward onu detect state to ONU adapter
+#         :param proxy_address: ONU device address
+#         :param state: ONU detect state (bool)
+#         :return: None
+#         """
+
+class ICoreSouthBoundInterface(Interface):
+    """
+    Represents a Voltha Core. This is used by an adapter to initiate async
+    calls towards Voltha Core.
+    """
+
+    def get_device(device_id):
+        """
+        Retrieve a device using its ID.
+        :param device_id: a device ID
+        :return: Device Object or None
+        """
+
+    def get_child_device(parent_device_id, **kwargs):
+        """
+        Retrieve a child device object belonging to the specified parent
+        device based on some match criteria. The first child device that
+        matches the provided criteria is returned.
+        :param parent_device_id: parent's device protobuf ID
+        :param **kwargs: arbitrary list of match criteria where the Value
+        in each key-value pair must be a protobuf type
+        :return: Child Device Object or None
+        """
+
+    def get_ports(device_id, port_type):
+        """
+        Retrieve all the ports of a given type of a Device.
+        :param device_id: a device ID
+        :param port_type: type of port
+        :return Ports object
+        """
+
+    def get_child_devices(parent_device_id):
+        """
+        Get all child devices given a parent device id
+        :param parent_device_id: The parent device ID
+        :return: Devices object
+        """
+
+    def get_child_device_with_proxy_address(proxy_address):
+        """
+        Get a child device based on its proxy address. Proxy address is
+        defined as {parent id, channel_id}
+        :param proxy_address: A Device.ProxyAddress object
+        :return: Device object or None
+        """
+
+    def device_state_update(device_id,
+                            oper_status=None,
+                            connect_status=None):
+        """
+        Update a device state.
+        :param device_id: The device ID
+        :param oper_state: Operational state of device
+        :param conn_state: Connection state of device
+        :return: None
+        """
+
+
+    def child_device_detected(parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        """
+        A child device has been detected.  Core will create the device along
+        with its unique ID.
+        :param parent_device_id: The parent device ID
+        :param parent_port_no: The parent port number
+        :param device_type: The child device type
+        :param channel_id: A unique identifier for that child device within
+        the parent device (e.g. vlan_id)
+        :param kw: A list of key-value pair where the value is a protobuf
+        message
+        :return: None
+        """
+
+    def device_update(device):
+        """
+        Event corresponding to a device update.
+        :param device: Device Object
+        :return: None
+        """
+
+    def child_device_removed(parent_device_id, child_device_id):
+        """
+        Event indicating a child device has been removed from a parent.
+        :param parent_device_id: Device ID of the parent
+        :param child_device_id: Device ID of the child
+        :return: None
+        """
+
+    def child_devices_state_update(parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None,
+                                   admin_status=None):
+        """
+        Event indicating the status of all child devices have been changed.
+        :param parent_device_id: Device ID of the parent
+        :param oper_status: Operational status
+        :param connect_status: Connection status
+        :param admin_status: Admin status
+        :return: None
+        """
+
+    def child_devices_removed(parent_device_id):
+        """
+        Event indicating all child devices have been removed from a parent.
+        :param parent_device_id: Device ID of the parent device
+        :return: None
+        """
+
+    def device_pm_config_update(device_pm_config, init=False):
+        """
+        Event corresponding to a PM config update of a device.
+        :param device_pm_config: a PmConfigs object
+        :param init: True indicates initializing stage
+        :return: None
+        """
+
+    def port_created(device_id, port):
+        """
+        A port has been created and needs to be added to a device.
+        :param device_id: a device ID
+        :param port: Port object
+        :return None
+        """
+
+    def port_removed(device_id, port):
+        """
+        A port has been removed and it needs to be removed from a Device.
+        :param device_id: a device ID
+        :param port: a Port object
+        :return None
+        """
+
+    def ports_enabled(device_id):
+        """
+        All ports on that device have been re-enabled. The Core will change
+        the admin state to ENABLED and operational state to ACTIVE for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_disabled(device_id):
+        """
+        All ports on that device have been disabled. The Core will change the
+        admin status to DISABLED and operational state to UNKNOWN for all
+        ports on that device.
+        :param device_id: a device ID
+        :return: None
+        """
+
+    def ports_oper_status_update(device_id, oper_status):
+        """
+        The operational status of all ports of a Device has been changed.
+        The Core will update the operational status for all ports on the
+        device.
+        :param device_id: a device ID
+        :param oper_status: operational Status
+        :return None
+        """
+
+    def image_download_update(img_dnld):
+        """
+        Event corresponding to an image download update.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def image_download_deleted(img_dnld):
+        """
+        Event corresponding to the deletion of a downloaded image. The
+        references of this image needs to be removed from the Core.
+        :param img_dnld: a ImageDownload object
+        :return: None
+        """
+
+    def packet_in(device_id, egress_port_no, packet):
+        """
+        Sends a packet to the SDN controller via voltha Core
+        :param device_id: The OLT device ID
+        :param egress_port_no: The port number representing the ONU (cvid)
+        :param packet: The actual packet
+         :return: None
+        """
+
+    # def add_device(device):
+    #     # TODO add doc
+    #     """"""
+
+    # def update_device(device):
+    #     # TODO add doc
+    #     """"""
+
+    # def add_port(device_id, port):
+    #     # TODO add doc
+    #     """"""
+
+    # def create_logical_device(logical_device):
+    #     # TODO add doc
+    #     """"""
+    #
+    # def add_logical_port(logical_device_id, port):
+    #     # TODO add doc
+    #     """"""
+
+    # def child_device_detected(parent_device_id,
+    #                           parent_port_no,
+    #                           child_device_type,
+    #                           proxy_address,
+    #                           admin_state,
+    #                           **kw):
+    #     # TODO add doc
+    #     """"""
+
+    # def send_proxied_message(proxy_address, msg):
+    #     """
+    #     Forward a msg to a child device of device, addressed by the given
+    #     proxy_address=Device.ProxyAddress().
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device.
+    #     :param msg: (str) The actual message to send.
+    #     :return: (Deferred(None) or None) The return of this method should
+    #      indicate that the message was successfully *sent*.
+    #     """
+    #
+    # def receive_proxied_message(proxy_address, msg):
+    #     """
+    #     Pass an async message (arrived via a proxy) to this device.
+    #     :param proxy_address: Address info for the parent device
+    #      to route the message to the child device. This was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Note this is the proxy_address with which the adapter
+    #      had to register prior to receiving proxied messages.
+    #     :param msg: (str) The actual message received.
+    #     :return: None
+    #     """
+    #
+    # def register_for_proxied_messages(proxy_address):
+    #     """
+    #     A child device adapter can use this to indicate its intent to
+    #     receive async messages sent via a parent device. Example: an
+    #     ONU adapter can use this to register for OMCI messages received
+    #     via the OLT and the OLT adapter.
+    #     :param child_device_address: Address info that was given to the
+    #      child device by the parent device at the creation of the child
+    #      device. Its uniqueness acts as a router information for the
+    #      registration.
+    #     :return: None
+    #     """
+    #
+    # def unregister_for_proxied_messages(proxy_address):
+    #     """
+    #     Cancel a previous registration
+    #     :return:
+    #     """
+    #
+    # def submit_kpis(kpi_event_msg):
+    #     """
+    #     Submit KPI metrics on behalf of the OLT and its adapter. This can
+    #     include hardware related metrics, usage and utilization metrics, as
+    #     well as optional adapter specific metrics.
+    #     :param kpi_event_msg: A protobuf message of KpiEvent type.
+    #     :return: None
+    #     """
+    #
+    # def submit_alarm(device_id, alarm_event_msg):
+    #     """
+    #     Submit an alarm on behalf of the OLT and its adapter.
+    #     :param alarm_event_msg: A protobuf message of AlarmEvent type.
+    #     :return: None
+    #     """
+
+    # def register_for_onu_detect_state(proxy_address):
+    #     """
+    #
+    #     :return: None
+    #     """
+    #
+    # def unregister_for_onu_detect_state(proxy_address):
+    #     """
+    #
+    #     :return: None
+    #     """
+    #
+    # def forward_onu_detect_state(proxy_address, state):
+    #     """
+    #     Forward onu detect state to ONU adapter
+    #     :param proxy_address: ONU device address
+    #     :param state: ONU detect state (bool)
+    #     :return: None
+    #     """
+    #
+    # def send_packet_in(logical_device_id, logical_port_no, packet):
+    #     """
+    #     Forward given packet to the northbound toward an SDN controller.
+    #     :param device_id: logical device identifier
+    #     :param logical_port_no: logical port_no (as numbered in openflow)
+    #     :param packet: the actual packet; can be a serialized string or a
+    #     scapy Packet.
+    #     :return: None returned on success
+    #     """
\ No newline at end of file
diff --git a/adapters/kafka/__init__.py b/adapters/kafka/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/adapters/kafka/__init__.py
diff --git a/adapters/kafka/adapter_request_facade.py b/adapters/kafka/adapter_request_facade.py
new file mode 100644
index 0000000..74ed934
--- /dev/null
+++ b/adapters/kafka/adapter_request_facade.py
@@ -0,0 +1,168 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between CORE and an individual adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import structlog
+from google.protobuf.json_format import MessageToJson
+from scapy.packet import Packet
+from twisted.internet.defer import inlineCallbacks, returnValue
+from zope.interface import implementer
+
+from adapters.common.event_bus import EventBusClient
+from adapters.common.frameio.frameio import hexify
+from adapters.common.utils.id_generation import create_cluster_logical_device_ids
+from adapters.interface import IAdapterInterface
+from adapters.protos.device_pb2 import Device
+
+from adapters.protos import third_party
+from adapters.protos.device_pb2 import Device, Port, PmConfigs
+from adapters.protos.events_pb2 import AlarmEvent, AlarmEventType, \
+    AlarmEventSeverity, AlarmEventState, AlarmEventCategory
+from adapters.protos.events_pb2 import KpiEvent
+from adapters.protos.voltha_pb2 import DeviceGroup, LogicalDevice, \
+    LogicalPort, AdminState, OperStatus, AlarmFilterRuleKey
+from adapters.common.utils.registry import registry
+from adapters.common.utils.id_generation import create_cluster_device_id
+from adapters.protos.core_adapter_pb2 import IntType
+import re
+
+
+class MacAddressError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+class IDError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+
+@implementer(IAdapterInterface)
+class AdapterRequestFacade(object):
+    """
+    Gate-keeper between CORE and device adapters.
+
+    On one side it interacts with Core's internal model and update/dispatch
+    mechanisms.
+
+    On the other side, it interacts with the adapters standard interface as
+    defined in
+    """
+
+    def __init__(self, adapter):
+        self.adapter = adapter
+
+    @inlineCallbacks
+    def start(self):
+        self.log.debug('starting')
+
+    @inlineCallbacks
+    def stop(self):
+        self.log.debug('stopping')
+
+    def adopt_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.adopt_device(d))
+        else:
+            return (False, d)
+
+    def get_ofp_device_info(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.get_ofp_device_info(d))
+        else:
+            return (False, d)
+
+    def get_ofp_port_info(self, device, port_no):
+        d = Device()
+        if device:
+            device.Unpack(d)
+        else:
+            return (False, d)
+
+        p = IntType()
+        port_no.Unpack(p)
+
+        return (True, self.adapter.get_ofp_port_info(d, p.val))
+
+
+    def reconcile_device(self, device):
+        return self.adapter.reconcile_device(device)
+
+    def abandon_device(self, device):
+        return self.adapter.abandon_device(device)
+
+    def disable_device(self, device):
+        return self.adapter.disable_device(device)
+
+    def reenable_device(self, device):
+        return self.adapter.reenable_device(device)
+
+    def reboot_device(self, device):
+        d = Device()
+        if device:
+            device.Unpack(d)
+            return (True, self.adapter.reboot_device(d))
+        else:
+            return (False, d)
+
+    def download_image(self, device, request):
+        return self.adapter.download_image(device, request)
+
+    def get_image_download_status(self, device, request):
+        return self.adapter.get_image_download_status(device, request)
+
+    def cancel_image_download(self, device, request):
+        return self.adapter.cancel_image_download(device, request)
+
+    def activate_image_update(self, device, request):
+        return self.adapter.activate_image_update(device, request)
+
+    def revert_image_update(self, device, request):
+        return self.adapter.revert_image_update(device, request)
+
+    def self_test(self, device):
+        return self.adapter.self_test_device(device)
+
+    def delete_device(self, device):
+        # Remove all child devices
+        self.delete_all_child_devices(device.id)
+
+        return self.adapter.delete_device(device)
+
+    def get_device_details(self, device):
+        return self.adapter.get_device_details(device)
+
+    def update_flows_bulk(self, device, flows, groups):
+        return self.adapter.update_flows_bulk(device, flows, groups)
+
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        return self.adapter.update_flows_incrementally(device, flow_changes, group_changes)
+
+    def suppress_alarm(self, filter):
+        return self.adapter.suppress_alarm(filter)
+
+    def unsuppress_alarm(self, filter):
+        return self.adapter.unsuppress_alarm(filter)
+
diff --git a/adapters/kafka/core_proxy.py b/adapters/kafka/core_proxy.py
new file mode 100644
index 0000000..bcc4239
--- /dev/null
+++ b/adapters/kafka/core_proxy.py
@@ -0,0 +1,331 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Agent to play gateway between CORE and an individual adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import structlog
+from google.protobuf.json_format import MessageToJson
+from scapy.packet import Packet
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.python import failure
+from zope.interface import implementer
+
+from adapters.common.event_bus import EventBusClient
+from adapters.common.frameio.frameio import hexify
+from adapters.common.utils.id_generation import create_cluster_logical_device_ids
+from adapters.interface import IAdapterInterface
+from adapters.protos import third_party
+from adapters.protos.device_pb2 import Device, Port, PmConfigs
+from adapters.protos.events_pb2 import AlarmEvent, AlarmEventType, \
+    AlarmEventSeverity, AlarmEventState, AlarmEventCategory
+from adapters.protos.events_pb2 import KpiEvent
+from adapters.protos.voltha_pb2 import DeviceGroup, LogicalDevice, \
+    LogicalPort, AdminState, OperStatus, AlarmFilterRuleKey, CoreInstance
+from adapters.common.utils.registry import registry, IComponent
+from adapters.common.utils.id_generation import create_cluster_device_id
+import re
+from adapters.interface import ICoreSouthBoundInterface
+from adapters.protos.core_adapter_pb2 import StrType, BoolType, IntType
+from adapters.protos.common_pb2 import ID
+from google.protobuf.message import Message
+from adapters.common.utils.deferred_utils import DeferredWithTimeout, TimeOutError
+
+log = structlog.get_logger()
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+def wrap_request(return_cls):
+    def real_wrapper(func):
+        @inlineCallbacks
+        def wrapper(*args, **kw):
+            try:
+                (success, d) = yield func(*args, **kw)
+                if success:
+                    log.debug("successful-response", func=func, val=d)
+                    if return_cls is not None:
+                        rc = return_cls()
+                        if d is not None:
+                            d.Unpack(rc)
+                        returnValue(rc)
+                    else:
+                        log.debug("successful-response-none", func=func,
+                                  val=None)
+                        returnValue(None)
+                else:
+                    log.warn("unsuccessful-request", func=func, args=args, kw=kw)
+                    returnValue(d)
+            except Exception as e:
+                log.exception("request-wrapper-exception", func=func, e=e)
+                raise
+        return wrapper
+    return real_wrapper
+
+
+@implementer(IComponent, ICoreSouthBoundInterface)
+class CoreProxy(object):
+
+    def __init__(self, kafka_proxy, core_topic, my_listening_topic):
+        self.kafka_proxy = kafka_proxy
+        self.listening_topic = my_listening_topic
+        self.core_topic = core_topic
+        self.default_timeout = 3
+
+    def start(self):
+        log.info('started')
+
+        return self
+
+    def stop(self):
+        log.info('stopped')
+
+    @inlineCallbacks
+    def invoke(self, rpc, to_topic=None, **kwargs):
+        @inlineCallbacks
+        def _send_request(rpc, m_callback,to_topic, **kwargs):
+            try:
+                log.debug("sending-request", rpc=rpc)
+                if to_topic is None:
+                    to_topic = self.core_topic
+                result = yield self.kafka_proxy.send_request(rpc=rpc,
+                                                             to_topic=to_topic,
+                                                             reply_topic=self.listening_topic,
+                                                             callback=None,
+                                                             **kwargs)
+                if not m_callback.called:
+                    m_callback.callback(result)
+                else:
+                    log.debug('timeout-already-occurred', rpc=rpc)
+            except Exception as e:
+                log.exception("Failure-sending-request", rpc=rpc, kw=kwargs)
+                if not m_callback.called:
+                    m_callback.errback(failure.Failure())
+
+        log.debug('invoke-request', rpc=rpc)
+        cb = DeferredWithTimeout(timeout=self.default_timeout)
+        _send_request(rpc, cb, to_topic, **kwargs)
+        try:
+            res = yield cb
+            returnValue(res)
+        except TimeOutError as e:
+            log.warn('invoke-timeout', e=e)
+            raise e
+
+
+    @wrap_request(CoreInstance)
+    @inlineCallbacks
+    def register(self, adapter):
+        log.debug("register")
+        try:
+            res = yield self.invoke(rpc="Register", adapter=adapter)
+            log.info("registration-returned", res=res)
+            returnValue(res)
+        except Exception as e:
+            log.exception("registration-exception", e=e)
+            raise
+
+    @wrap_request(Device)
+    @inlineCallbacks
+    def get_device(self, device_id):
+        log.debug("get-device")
+        id = ID()
+        id.id = device_id
+        res = yield self.invoke(rpc="GetDevice", device_id=id)
+        returnValue(res)
+
+    @wrap_request(Device)
+    @inlineCallbacks
+    def get_child_device(self, parent_device_id, **kwargs):
+        raise NotImplementedError()
+
+    # def add_device(self, device):
+    #     raise NotImplementedError()
+
+    def get_ports(self, device_id, port_type):
+        raise NotImplementedError()
+
+    def get_child_devices(self, parent_device_id):
+        raise NotImplementedError()
+
+    def get_child_device_with_proxy_address(self, proxy_address):
+        raise NotImplementedError()
+
+    def _to_proto(self, **kwargs):
+        encoded = {}
+        for k,v in kwargs.iteritems():
+            if isinstance(v, Message):
+                encoded[k] = v
+            elif type(v) == int:
+                i_proto = IntType()
+                i_proto.val = v
+                encoded[k] = i_proto
+            elif type(v) == str:
+                s_proto = StrType()
+                s_proto.val = v
+                encoded[k] = s_proto
+            elif type(v) == bool:
+                b_proto = BoolType()
+                b_proto.val = v
+                encoded[k] = b_proto
+        return encoded
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def child_device_detected(self,
+                              parent_device_id,
+                              parent_port_no,
+                              child_device_type,
+                              channel_id,
+                              **kw):
+        id = ID()
+        id.id = parent_device_id
+        ppn = IntType()
+        ppn.val = parent_port_no
+        cdt = StrType()
+        cdt.val = child_device_type
+        channel = IntType()
+        channel.val = channel_id
+
+        args = self._to_proto(**kw)
+        res = yield self.invoke(rpc="ChildDeviceDetected",
+                                parent_device_id=id,
+                                parent_port_no = ppn,
+                                child_device_type= cdt,
+                                channel_id=channel,
+                                **args)
+        returnValue(res)
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_update(self, device):
+        log.debug("device_update")
+        res = yield self.invoke(rpc="DeviceUpdate", device=device)
+        returnValue(res)
+
+    def child_device_removed(parent_device_id, child_device_id):
+        raise NotImplementedError()
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_state_update(self, device_id,
+                                   oper_status=None,
+                                   connect_status=None):
+
+        id = ID()
+        id.id = device_id
+        o_status = IntType()
+        if oper_status:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+        a_status = IntType()
+
+        res = yield self.invoke(rpc="DeviceStateUpdate",
+                                device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status)
+        returnValue(res)
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def child_devices_state_update(self, parent_device_id,
+                                   oper_status=None,
+                                   connect_status=None,
+                                   admin_state=None):
+
+        id = ID()
+        id.id = parent_device_id
+        o_status = IntType()
+        if oper_status:
+            o_status.val = oper_status
+        else:
+            o_status.val = -1
+        c_status = IntType()
+        if connect_status:
+            c_status.val = connect_status
+        else:
+            c_status.val = -1
+        a_status = IntType()
+        if admin_state:
+            a_status.val = admin_state
+        else:
+            a_status.val = -1
+
+        res = yield self.invoke(rpc="child_devices_state_update",
+                                parent_device_id=id,
+                                oper_status=o_status,
+                                connect_status=c_status,
+                                admin_state=a_status)
+        returnValue(res)
+
+
+    def child_devices_removed(parent_device_id):
+        raise NotImplementedError()
+
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def device_pm_config_update(self, device_pm_config, init=False):
+        log.debug("device_pm_config_update")
+        b = BoolType()
+        b.val = init
+        res = yield self.invoke(rpc="DevicePMConfigUpdate",
+                                device_pm_config=device_pm_config, init=b)
+        returnValue(res)
+
+    @wrap_request(None)
+    @inlineCallbacks
+    def port_created(self, device_id, port):
+        log.debug("port_created")
+        proto_id = ID()
+        proto_id.id = device_id
+        res = yield self.invoke(rpc="PortCreated", device_id=proto_id, port=port)
+        returnValue(res)
+
+
+    def port_removed(device_id, port):
+        raise NotImplementedError()
+
+    def ports_enabled(device_id):
+        raise NotImplementedError()
+
+    def ports_disabled(device_id):
+        raise NotImplementedError()
+
+    def ports_oper_status_update(device_id, oper_status):
+        raise NotImplementedError()
+
+    def image_download_update(img_dnld):
+        raise NotImplementedError()
+
+    def image_download_deleted(img_dnld):
+        raise NotImplementedError()
+
+    def packet_in(device_id, egress_port_no, packet):
+        raise NotImplementedError()
diff --git a/adapters/kafka/event_bus_publisher.py b/adapters/kafka/event_bus_publisher.py
new file mode 100644
index 0000000..011fdea
--- /dev/null
+++ b/adapters/kafka/event_bus_publisher.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+A gateway between the internal event bus and the Kafka publisher proxy
+to publish select topics and messages posted to the Voltha-internal event
+bus toward the external world.
+"""
+import structlog
+from google.protobuf.json_format import MessageToDict
+from google.protobuf.message import Message
+from simplejson import dumps
+
+from adapters.common.event_bus import EventBusClient
+
+log = structlog.get_logger()
+
+
+class EventBusPublisher(object):
+
+    def __init__(self, kafka_proxy, config):
+        self.kafka_proxy = kafka_proxy
+        self.config = config
+        self.topic_mappings = config.get('topic_mappings', {})
+        self.event_bus = EventBusClient()
+        self.subscriptions = None
+
+    def start(self):
+        log.debug('starting')
+        self.subscriptions = list()
+        self._setup_subscriptions(self.topic_mappings)
+        log.info('started')
+        return self
+
+    def stop(self):
+        try:
+            log.debug('stopping-event-bus')
+            if self.subscriptions:
+                for subscription in self.subscriptions:
+                    self.event_bus.unsubscribe(subscription)
+            log.info('stopped-event-bus')
+        except Exception, e:
+            log.exception('failed-stopping-event-bus', e=e)
+            return
+
+    def _setup_subscriptions(self, mappings):
+
+        for event_bus_topic, mapping in mappings.iteritems():
+
+            kafka_topic = mapping.get('kafka_topic', None)
+
+            if kafka_topic is None:
+                log.error('no-kafka-topic-in-config',
+                          event_bus_topic=event_bus_topic,
+                          mapping=mapping)
+                continue
+
+            self.subscriptions.append(self.event_bus.subscribe(
+                event_bus_topic,
+                # to avoid Python late-binding to the last registered
+                # kafka_topic, we force instant binding with the default arg
+                lambda _, m, k=kafka_topic: self.forward(k, m)))
+
+            log.info('event-to-kafka', kafka_topic=kafka_topic,
+                     event_bus_topic=event_bus_topic)
+
+    def forward(self, kafka_topic, msg):
+        try:
+            # convert to JSON string if msg is a protobuf msg
+            if isinstance(msg, Message):
+                msg = dumps(MessageToDict(msg, True, True))
+            log.debug('forward-event-bus-publisher')
+            self.kafka_proxy.send_message(kafka_topic, msg)
+        except Exception, e:
+            log.exception('failed-forward-event-bus-publisher', e=e)
+
diff --git a/adapters/kafka/kafka_inter_container_library.py b/adapters/kafka/kafka_inter_container_library.py
new file mode 100644
index 0000000..ad53812
--- /dev/null
+++ b/adapters/kafka/kafka_inter_container_library.py
@@ -0,0 +1,584 @@
+#!/usr/bin/env python
+
+from zope.interface import Interface, implementer
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue, Deferred, \
+    DeferredQueue, gatherResults
+from afkak.client import KafkaClient
+from afkak.consumer import OFFSET_LATEST, Consumer
+import structlog
+from adapters.common.utils import asleep
+from adapters.protos.core_adapter_pb2 import MessageType, Argument, \
+    InterContainerRequestBody, InterContainerMessage, Header, InterContainerResponseBody
+import time
+from uuid import uuid4
+from adapters.common.utils.registry import IComponent
+
+
+log = structlog.get_logger()
+
+class KafkaMessagingError(BaseException):
+    def __init__(self, error):
+        self.error = error
+
+@implementer(IComponent)
+class IKafkaMessagingProxy(object):
+    _kafka_messaging_instance = None
+
+    def __init__(self,
+                 kafka_host_port,
+                 kv_store,
+                 default_topic,
+                 target_cls):
+        """
+        Initialize the kafka proxy.  This is a singleton (may change to
+        non-singleton if performance is better)
+        :param kafka_host_port: Kafka host and port
+        :param kv_store: Key-Value store
+        :param default_topic: Default topic to subscribe to
+        :param target_cls: target class - method of that class is invoked
+        when a message is received on the default_topic
+        """
+        # return an exception if the object already exist
+        if IKafkaMessagingProxy._kafka_messaging_instance:
+            raise Exception(
+                'Singleton-exist', cls=IKafkaMessagingProxy)
+
+        log.debug("Initializing-KafkaProxy")
+        self.kafka_host_port = kafka_host_port
+        self.kv_store = kv_store
+        self.default_topic = default_topic
+        self.target_cls = target_cls
+        self.topic_target_cls_map = {}
+        self.topic_consumer_map = {}
+        self.topic_callback_map = {}
+        self.subscribers = {}
+        self.kafka_client = None
+        self.kafka_proxy = None
+        self.transaction_id_deferred_map = {}
+        self.received_msg_queue = DeferredQueue()
+
+        self.init_time = 0
+        self.init_received_time = 0
+
+        self.init_resp_time = 0
+        self.init_received_resp_time = 0
+
+        self.num_messages = 0
+        self.total_time = 0
+        self.num_responses = 0
+        self.total_time_responses = 0
+        log.debug("KafkaProxy-initialized")
+
+    def start(self):
+        try:
+            # Create the kafka client
+            # assert self.kafka_host is not None
+            # assert self.kafka_port is not None
+            # kafka_host_port = ":".join((self.kafka_host, self.kafka_port))
+            self.kafka_client = KafkaClient(self.kafka_host_port)
+
+            # Get the kafka proxy instance.  If it does not exist then
+            # create it
+            self.kafka_proxy = get_kafka_proxy()
+            if self.kafka_proxy == None:
+                KafkaProxy(kafka_endpoint=self.kafka_host_port).start()
+                self.kafka_proxy = get_kafka_proxy()
+
+            # Subscribe the default topic and target_cls
+            self.topic_target_cls_map[self.default_topic] = self.target_cls
+
+            # Start the queue to handle incoming messages
+            reactor.callLater(0, self._received_message_processing_loop)
+
+            # Start listening for incoming messages
+            reactor.callLater(0, self.subscribe, self.default_topic,
+                              target_cls=self.target_cls)
+
+            # Setup the singleton instance
+            IKafkaMessagingProxy._kafka_messaging_instance = self
+        except Exception as e:
+            log.exception("Failed-to-start-proxy", e=e)
+
+
+    def stop(self):
+        """
+        Invoked to stop the kafka proxy
+        :return: None on success, Exception on failure
+        """
+        log.debug("Stopping-messaging-proxy ...")
+        try:
+            # Stop all the consumers
+            deferred_list = []
+            for key, values in self.topic_consumer_map.iteritems():
+                deferred_list.extend([c.stop() for c in values])
+
+            if not deferred_list:
+                d = gatherResults(deferred_list)
+                d.addCallback(lambda result: self.kafka_client.close())
+            log.debug("Messaging-proxy-stopped.")
+        except Exception as e:
+            log.exception("Exception-when-stopping-messaging-proxy:", e=e)
+
+
+    @inlineCallbacks
+    def _wait_until_topic_is_ready(self, client, topic):
+        e = True
+        while e:
+            yield client.load_metadata_for_topics(topic)
+            e = client.metadata_error_for_topic(topic)
+            if e:
+                log.debug("Topic-not-ready-retrying...", topic=topic)
+
+    def _clear_backoff(self):
+        if self.retries:
+            log.info('reconnected-to-consul', after_retries=self.retries)
+            self.retries = 0
+
+    @inlineCallbacks
+    def _subscribe(self, topic, callback=None, target_cls=None):
+        try:
+            yield self._wait_until_topic_is_ready(self.kafka_client, topic)
+            partitions = self.kafka_client.topic_partitions[topic]
+            consumers = []
+
+            # First setup the generic callback - all received messages will
+            # go through that queue
+            if topic not in self.topic_consumer_map:
+                consumers = [Consumer(self.kafka_client, topic, partition,
+                                      self._enqueue_received_message)
+                             for partition in partitions]
+                self.topic_consumer_map[topic] = consumers
+
+            log.debug("_subscribe", topic=topic, consumermap=self.topic_consumer_map)
+
+            if target_cls is not None and callback is None:
+                # Scenario #1
+                if topic not in self.topic_target_cls_map:
+                    self.topic_target_cls_map[topic] = target_cls
+            elif target_cls is None and callback is not None:
+                # Scenario #2
+                log.debug("custom-callback", topic=topic,
+                          callback_map=self.topic_callback_map)
+                if topic not in self.topic_callback_map:
+                    self.topic_callback_map[topic] = [callback]
+                else:
+                    self.topic_callback_map[topic].extend([callback])
+            else:
+                log.warn("invalid-parameters")
+
+            def cb_closed(result):
+                """
+                Called when a consumer cleanly stops.
+                """
+                log.debug("Consumers-cleanly-stopped")
+
+            def eb_failed(failure):
+                """
+                Called when a consumer fails due to an uncaught exception in the
+                processing callback or a network error on shutdown. In this case we
+                simply log the error.
+                """
+                log.warn("Consumers-failed", failure=failure)
+
+            for c in consumers:
+                c.start(OFFSET_LATEST).addCallbacks(cb_closed, eb_failed)
+
+            returnValue(True)
+        except Exception as e:
+            log.exception("Exception-during-subscription", e=e)
+            returnValue(False)
+
+    def subscribe(self, topic, callback=None, target_cls=None,
+                  max_retry=3):
+        """
+        Scenario 1:  invoked to subscribe to a specific topic with a
+        target_cls to invoke when a message is received on that topic.  This
+        handles the case of request/response where this library performs the
+        heavy lifting. In this case the m_callback must to be None
+
+        Scenario 2:  invoked to subscribe to a specific topic with a
+        specific callback to invoke when a message is received on that topic.
+        This handles the case where the caller wants to process the message
+        received itself. In this case the target_cls must to be None
+
+        :param topic: topic to subscribe to
+        :param callback: Callback to invoke when a message is received on
+        the topic. Either one of callback or target_cls needs can be none
+        :param target_cls:  Target class to use when a message is
+        received on the topic. There can only be 1 target_cls per topic.
+        Either one of callback or target_cls needs can be none
+        :param max_retry:  the number of retries before reporting failure
+        to subscribe.  This caters for scenario where the kafka topic is not
+        ready.
+        :return: True on success, False on failure
+        """
+        RETRY_BACKOFF = [0.05, 0.1, 0.2, 0.5, 1, 2, 5]
+
+        def _backoff(msg, retries):
+            wait_time = RETRY_BACKOFF[min(retries,
+                                          len(RETRY_BACKOFF) - 1)]
+            log.info(msg, retry_in=wait_time)
+            return asleep(wait_time)
+
+        retry = 0
+        while not self._subscribe(topic, callback=callback,
+                                  target_cls=target_cls):
+            if retry > max_retry:
+                return False
+            else:
+                _backoff("subscription-not-complete", retry)
+                retry += 1
+        return True
+
+    def unsubscribe(self, topic):
+        """
+        Invoked when unsubscribing to a topic
+        :param topic: topic to unsubscibe from
+        :return: None on success or Exception on failure
+        """
+        log.debug("Unsubscribing-to-topic", topic=topic)
+
+        def remove_topic(topic):
+            if topic in self.topic_consumer_map:
+                del self.topic_consumer_map[topic]
+
+        try:
+            if topic in self.topic_consumer_map:
+                consumers = self.topic_consumer_map[topic]
+                d = gatherResults([c.stop() for c in consumers])
+                d.addCallback(remove_topic, topic)
+                log.debug("Unsubscribed-to-topic.", topic=topic)
+            else:
+                log.debug("Topic-does-not-exist.", topic=topic)
+        except Exception as e:
+            log.exception("Exception-when-stopping-messaging-proxy:", e=e)
+
+    @inlineCallbacks
+    def _enqueue_received_message(self, reactor, message_list):
+        """
+        Internal method to continuously queue all received messaged
+        irrespective of topic
+        :param reactor: A requirement by the Twisted Python kafka library
+        :param message_list: Received list of messages
+        :return: None on success, Exception on failure
+        """
+        try:
+            for m in message_list:
+                log.debug("received-msg", msg=m)
+                yield self.received_msg_queue.put(m)
+        except Exception as e:
+            log.exception("Failed-enqueueing-received-message", e=e)
+
+    @inlineCallbacks
+    def _received_message_processing_loop(self):
+        """
+        Internal method to continuously process all received messages one
+        at a time
+        :return: None on success, Exception on failure
+        """
+        while True:
+            try:
+                message = yield self.received_msg_queue.get()
+                yield self._process_message(message)
+            except Exception as e:
+                log.exception("Failed-dequeueing-received-message", e=e)
+
+    def _to_string(self, unicode_str):
+        if unicode_str is not None:
+            if type(unicode_str) == unicode:
+                return unicode_str.encode('ascii', 'ignore')
+            else:
+                return unicode_str
+        else:
+            return None
+
+    def _format_request(self,
+                        rpc,
+                        to_topic,
+                        reply_topic,
+                        **kwargs):
+        """
+        Format a request to send over kafka
+        :param rpc: Requested remote API
+        :param to_topic: Topic to send the request
+        :param reply_topic: Topic to receive the resulting response, if any
+        :param kwargs: Dictionary of key-value pairs to pass as arguments to
+        the remote rpc API.
+        :return: A InterContainerMessage message type on success or None on
+        failure
+        """
+        try:
+            transaction_id = uuid4().hex
+            request = InterContainerMessage()
+            request_body = InterContainerRequestBody()
+            request.header.id = transaction_id
+            request.header.type = MessageType.Value("REQUEST")
+            request.header.from_topic = self.default_topic
+            request.header.to_topic = to_topic
+
+            response_required = False
+            if reply_topic:
+                request_body.reply_to_topic = reply_topic
+                response_required = True
+
+            request.header.timestamp = int(round(time.time() * 1000))
+            request_body.rpc = rpc
+            for a, b in kwargs.iteritems():
+                arg = Argument()
+                arg.key = a
+                try:
+                    arg.value.Pack(b)
+                    request_body.args.extend([arg])
+                except Exception as e:
+                    log.exception("Failed-parsing-value", e=e)
+            request_body.reply_to_topic = self.default_topic
+            request_body.response_required = response_required
+            request.body.Pack(request_body)
+            return request, transaction_id, response_required
+        except Exception as e:
+            log.exception("formatting-request-failed",
+                          rpc=rpc,
+                          to_topic=to_topic,
+                          reply_topic=reply_topic,
+                          args=kwargs)
+            return None, None, None
+
+    def _format_response(self, msg_header, msg_body, status):
+        """
+        Format a response
+        :param msg_header: The header portion of a received request
+        :param msg_body: The response body
+        :param status: True is this represents a successful response
+        :return: a InterContainerMessage message type
+        """
+        try:
+            assert isinstance(msg_header, Header)
+            response = InterContainerMessage()
+            response_body = InterContainerResponseBody()
+            response.header.id = msg_header.id
+            response.header.timestamp = int(
+                round(time.time() * 1000))
+            response.header.type = MessageType.Value("RESPONSE")
+            response.header.from_topic = msg_header.to_topic
+            response.header.to_topic = msg_header.from_topic
+            if msg_body is not None:
+                response_body.result.Pack(msg_body)
+            response_body.success = status
+            response.body.Pack(response_body)
+            return response
+        except Exception as e:
+            log.exception("formatting-response-failed", header=msg_header,
+                          body=msg_body, status=status, e=e)
+            return None
+
+    def _parse_response(self, msg):
+        try:
+            message = InterContainerMessage()
+            message.ParseFromString(msg)
+            resp = InterContainerResponseBody()
+            if message.body.Is(InterContainerResponseBody.DESCRIPTOR):
+                message.body.Unpack(resp)
+            else:
+                log.debug("unsupported-msg", msg_type=type(message.body))
+                return None
+            log.debug("parsed-response", input=message, output=resp)
+            return resp
+        except Exception as e:
+            log.exception("parsing-response-failed", msg=msg, e=e)
+            return None
+
+    @inlineCallbacks
+    def _process_message(self, m):
+        """
+        Default internal method invoked for every batch of messages received
+        from Kafka.
+        """
+        def _toDict(args):
+            """
+            Convert a repeatable Argument type into a python dictionary
+            :param args: Repeatable core_adapter.Argument type
+            :return: a python dictionary
+            """
+            if args is None:
+                return None
+            result = {}
+            for arg in args:
+                assert isinstance(arg, Argument)
+                result[arg.key] = arg.value
+            return result
+
+        current_time = int(round(time.time() * 1000))
+        # log.debug("Got Message", message=m)
+        try:
+            val = m.message.value
+            # print m.topic
+
+            # Go over customized callbacks first
+            if m.topic in self.topic_callback_map:
+                for c in self.topic_callback_map[m.topic]:
+                    yield c(val)
+
+            #  Check whether we need to process request/response scenario
+            if m.topic not in self.topic_target_cls_map:
+                return
+
+            # Process request/response scenario
+            message = InterContainerMessage()
+            message.ParseFromString(val)
+
+            if message.header.type == MessageType.Value("REQUEST"):
+                # if self.num_messages == 0:
+                #     self.init_time = int(round(time.time() * 1000))
+                #     self.init_received_time = message.header.timestamp
+                #     log.debug("INIT_TIME", time=self.init_time,
+                #               time_sent=message.header.timestamp)
+                # self.num_messages = self.num_messages + 1
+                #
+                # self.total_time = self.total_time + current_time - message.header.timestamp
+                #
+                # if self.num_messages % 10 == 0:
+                #     log.debug("TOTAL_TIME ...",
+                #               num=self.num_messages,
+                #               total=self.total_time,
+                #               duration=current_time - self.init_time,
+                #               time_since_first_msg=current_time - self.init_received_time,
+                #               average=self.total_time / 10)
+                #     self.total_time = 0
+
+                # Get the target class for that specific topic
+                targetted_topic = self._to_string(message.header.to_topic)
+                msg_body = InterContainerRequestBody()
+                if message.body.Is(InterContainerRequestBody.DESCRIPTOR):
+                    message.body.Unpack(msg_body)
+                else:
+                    log.debug("unsupported-msg", msg_type=type(message.body))
+                    return
+                if targetted_topic in self.topic_target_cls_map:
+                    if msg_body.args:
+                        log.debug("message-body-args-present", body=msg_body)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))(
+                            **_toDict(msg_body.args))
+                    else:
+                        log.debug("message-body-args-absent", body=msg_body,
+                                  rpc=msg_body.rpc)
+                        (status, res) = yield getattr(
+                            self.topic_target_cls_map[targetted_topic],
+                            self._to_string(msg_body.rpc))()
+                    if msg_body.response_required:
+                        response = self._format_response(
+                            msg_header=message.header,
+                            msg_body=res,
+                            status=status,
+                        )
+                        if response is not None:
+                            res_topic = self._to_string(
+                                response.header.to_topic)
+                            self._send_kafka_message(res_topic, response)
+
+                        log.debug("Response-sent", response=response.body)
+            elif message.header.type == MessageType.Value("RESPONSE"):
+                trns_id = self._to_string(message.header.id)
+                if trns_id in self.transaction_id_deferred_map:
+                    # self.num_responses = self.num_responses + 1
+                    # self.total_time_responses = self.total_time_responses + current_time - \
+                    #                             message.header.timestamp
+                    # if self.num_responses % 10 == 0:
+                    #     log.debug("TOTAL RESPONSES ...",
+                    #               num=self.num_responses,
+                    #               total=self.total_time_responses,
+                    #               average=self.total_time_responses / 10)
+                    #     self.total_time_responses = 0
+
+                    resp = self._parse_response(val)
+
+                    self.transaction_id_deferred_map[trns_id].callback(resp)
+            else:
+                log.error("!!INVALID-TRANSACTION-TYPE!!")
+
+        except Exception as e:
+            log.exception("Failed-to-process-message", message=m, e=e)
+
+    @inlineCallbacks
+    def _send_kafka_message(self, topic, msg):
+        try:
+            yield self.kafka_proxy.send_message(topic, msg.SerializeToString())
+        except Exception, e:
+            log.exception("Failed-sending-message", message=msg, e=e)
+
+    @inlineCallbacks
+    def send_request(self,
+                     rpc,
+                     to_topic,
+                     reply_topic=None,
+                     callback=None,
+                     **kwargs):
+        """
+        Invoked to send a message to a remote container and receive a
+        response if required.
+        :param rpc: The remote API to invoke
+        :param to_topic: Send the message to this kafka topic
+        :param reply_topic: If not None then a response is expected on this
+        topic.  If set to None then no response is required.
+        :param callback: Callback to invoke when a response is received.
+        :param kwargs: Key-value pairs representing arguments to pass to the
+        rpc remote API.
+        :return: Either no response is required, or a response is returned
+        via the callback or the response is a tuple of (status, return_cls)
+        """
+        try:
+            # Ensure all strings are not unicode encoded
+            rpc = self._to_string(rpc)
+            to_topic = self._to_string(to_topic)
+            reply_topic = self._to_string(reply_topic)
+
+            request, transaction_id, response_required = \
+                self._format_request(
+                    rpc=rpc,
+                    to_topic=to_topic,
+                    reply_topic=reply_topic,
+                    **kwargs)
+
+            if request is None:
+                return
+
+            # Add the transaction to the transaction map before sending the
+            # request.  This will guarantee the eventual response will be
+            # processed.
+            wait_for_result = None
+            if response_required:
+                wait_for_result = Deferred()
+                self.transaction_id_deferred_map[
+                    self._to_string(request.header.id)] = wait_for_result
+
+            log.debug("BEFORE-SENDING", to_topic=to_topic, from_topic=reply_topic)
+            yield self._send_kafka_message(to_topic, request)
+            log.debug("AFTER-SENDING", to_topic=to_topic, from_topic=reply_topic)
+
+            if response_required:
+                res = yield wait_for_result
+
+                if res is None or not res.success:
+                    raise KafkaMessagingError(error="Failed-response:{"
+                                                    "}".format(res))
+
+                # Remove the transaction from the transaction map
+                del self.transaction_id_deferred_map[transaction_id]
+
+                log.debug("send-message-response", rpc=rpc, result=res)
+
+                if callback:
+                    callback((res.success, res.result))
+                else:
+                    returnValue((res.success, res.result))
+        except Exception as e:
+            log.exception("Exception-sending-request", e=e)
+            raise KafkaMessagingError(error=e)
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_messaging_proxy():
+    return IKafkaMessagingProxy._kafka_messaging_instance
diff --git a/adapters/kafka/kafka_proxy.py b/adapters/kafka/kafka_proxy.py
new file mode 100644
index 0000000..10fdbf8
--- /dev/null
+++ b/adapters/kafka/kafka_proxy.py
@@ -0,0 +1,209 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from afkak.client import KafkaClient as _KafkaClient
+from afkak.common import (
+    PRODUCER_ACK_LOCAL_WRITE, PRODUCER_ACK_NOT_REQUIRED
+)
+from afkak.producer import Producer as _kafkaProducer
+from structlog import get_logger
+from twisted.internet.defer import inlineCallbacks, returnValue
+from zope.interface import implementer
+
+from adapters.common.utils.consulhelpers import get_endpoint_from_consul
+from adapters.kafka.event_bus_publisher import EventBusPublisher
+from adapters.common.utils.registry import IComponent
+import time
+
+log = get_logger()
+
+
+@implementer(IComponent)
+class KafkaProxy(object):
+    """
+    This is a singleton proxy kafka class to hide the kafka client details.
+    """
+    _kafka_instance = None
+
+    def __init__(self,
+                 consul_endpoint='localhost:8500',
+                 kafka_endpoint='localhost:9092',
+                 ack_timeout=1000,
+                 max_req_attempts=10,
+                 config={}):
+
+        # return an exception if the object already exist
+        if KafkaProxy._kafka_instance:
+            raise Exception('Singleton exist for :{}'.format(KafkaProxy))
+
+        log.debug('initializing', endpoint=kafka_endpoint)
+        self.ack_timeout = ack_timeout
+        self.max_req_attempts = max_req_attempts
+        self.consul_endpoint = consul_endpoint
+        self.kafka_endpoint = kafka_endpoint
+        self.config = config
+        self.kclient = None
+        self.kproducer = None
+        self.event_bus_publisher = None
+        self.stopping = False
+        self.faulty = False
+        log.debug('initialized', endpoint=kafka_endpoint)
+
+    @inlineCallbacks
+    def start(self):
+        log.debug('starting')
+        self._get_kafka_producer()
+        KafkaProxy._kafka_instance = self
+        self.event_bus_publisher = yield EventBusPublisher(
+            self, self.config.get('event_bus_publisher', {})).start()
+        log.info('started')
+        KafkaProxy.faulty = False
+        self.stopping = False
+        returnValue(self)
+
+    @inlineCallbacks
+    def stop(self):
+        try:
+            log.debug('stopping-kafka-proxy')
+            try:
+                if self.kclient:
+                    yield self.kclient.close()
+                    self.kclient = None
+                    log.debug('stopped-kclient-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kclient-kafka-proxy', e=e)
+                pass
+
+            try:
+                if self.kproducer:
+                    yield self.kproducer.stop()
+                    self.kproducer = None
+                    log.debug('stopped-kproducer-kafka-proxy')
+            except Exception, e:
+                log.exception('failed-stopped-kproducer-kafka-proxy', e=e)
+                pass
+
+            #try:
+            #    if self.event_bus_publisher:
+            #        yield self.event_bus_publisher.stop()
+            #        self.event_bus_publisher = None
+            #        log.debug('stopped-event-bus-publisher-kafka-proxy')
+            #except Exception, e:
+            #    log.debug('failed-stopped-event-bus-publisher-kafka-proxy')
+            #    pass
+
+            log.debug('stopped-kafka-proxy')
+
+        except Exception, e:
+            self.kclient = None
+            self.kproducer = None
+            #self.event_bus_publisher = None
+            log.exception('failed-stopped-kafka-proxy', e=e)
+            pass
+
+    def _get_kafka_producer(self):
+        # PRODUCER_ACK_LOCAL_WRITE : server will wait till the data is written
+        #  to a local log before sending response
+        try:
+
+            if self.kafka_endpoint.startswith('@'):
+                try:
+                    _k_endpoint = get_endpoint_from_consul(self.consul_endpoint,
+                                                           self.kafka_endpoint[1:])
+                    log.debug('found-kafka-service', endpoint=_k_endpoint)
+
+                except Exception as e:
+                    log.exception('no-kafka-service-in-consul', e=e)
+
+                    self.kproducer = None
+                    self.kclient = None
+                    return
+            else:
+                _k_endpoint = self.kafka_endpoint
+
+            self.kclient = _KafkaClient(_k_endpoint)
+            self.kproducer = _kafkaProducer(self.kclient,
+                                            req_acks=PRODUCER_ACK_NOT_REQUIRED,
+                                            # req_acks=PRODUCER_ACK_LOCAL_WRITE,
+                                            # ack_timeout=self.ack_timeout,
+                                            # max_req_attempts=self.max_req_attempts)
+                                            )
+        except Exception, e:
+            log.exception('failed-get-kafka-producer', e=e)
+            return
+
+    @inlineCallbacks
+    def send_message(self, topic, msg):
+        assert topic is not None
+        assert msg is not None
+
+        # first check whether we have a kafka producer.  If there is none
+        # then try to get one - this happens only when we try to lookup the
+        # kafka service from consul
+        try:
+            if self.faulty is False:
+
+                if self.kproducer is None:
+                    self._get_kafka_producer()
+                    # Lets the next message request do the retry if still a failure
+                    if self.kproducer is None:
+                        log.error('no-kafka-producer', endpoint=self.kafka_endpoint)
+                        return
+
+                # log.debug('sending-kafka-msg', topic=topic, msg=msg)
+                msgs = [msg]
+
+                if self.kproducer and self.kclient and \
+                        self.event_bus_publisher and self.faulty is False:
+                    # log.debug('sending-kafka-msg-I-am-here0', time=int(round(time.time() * 1000)))
+
+                    yield self.kproducer.send_messages(topic, msgs=msgs)
+                    # self.kproducer.send_messages(topic, msgs=msgs)
+                    # log.debug('sent-kafka-msg', topic=topic, msg=msg)
+                else:
+                    return
+
+        except Exception, e:
+            self.faulty = True
+            log.error('failed-to-send-kafka-msg', topic=topic, msg=msg, e=e)
+
+            # set the kafka producer to None.  This is needed if the
+            # kafka docker went down and comes back up with a different
+            # port number.
+            if self.stopping is False:
+                log.debug('stopping-kafka-proxy')
+                try:
+                    self.stopping = True
+                    self.stop()
+                    self.stopping = False
+                    self.faulty = False
+                    log.debug('stopped-kafka-proxy')
+                except Exception, e:
+                    log.exception('failed-stopping-kafka-proxy', e=e)
+                    pass
+            else:
+                log.info('already-stopping-kafka-proxy')
+
+            return
+
+    def is_faulty(self):
+        return self.faulty
+
+
+# Common method to get the singleton instance of the kafka proxy class
+def get_kafka_proxy():
+    return KafkaProxy._kafka_instance
+
diff --git a/adapters/ponsim_olt/VERSION b/adapters/ponsim_olt/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/adapters/ponsim_olt/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/adapters/ponsim_olt/__init__.py b/adapters/ponsim_olt/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/ponsim_olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/ponsim_olt/main.py b/adapters/ponsim_olt/main.py
new file mode 100755
index 0000000..53745ee
--- /dev/null
+++ b/adapters/ponsim_olt/main.py
@@ -0,0 +1,487 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim OLT Adapter main entry point"""
+
+import argparse
+import arrow
+import os
+import time
+
+import yaml
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+from adapters.protos import third_party
+from adapters.common.structlog_setup import setup_logging, update_logging
+from adapters.common.utils.dockerhelpers import get_my_containers_name
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adapters.common.utils.registry import registry, IComponent
+from packaging.version import Version
+from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, get_messaging_proxy
+from adapters.ponsim_olt.ponsim_olt import PonSimOltAdapter
+from adapters.protos.adapter_pb2 import AdapterConfig, Adapter
+from adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from adapters.kafka.core_proxy import CoreProxy
+from adapters.common.utils.deferred_utils import TimeOutError
+from adapters.common.utils.asleep import asleep
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './ponsim_olt.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'ponsim_olt'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'ponsim_olt'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to ponsim_onu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of ponsim adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+    log.info(' ____                 _              ___  _   _____   ')
+    log.info('|  _ \ ___  _ __  ___(_)_ __ ___    / _ \| | |_   _|  ')
+    log.info('| |_) / _ \| \'_ \/ __| | \'_ ` _ \  | | | | |   | |  ')
+    log.info('|  __/ (_) | | | \__ \ | | | | | | | |_| | |___| |    ')
+    log.info('|_|   \___/|_| |_|___/_|_| |_| |_|  \___/|_____|_|    ')
+    log.info('                                                      ')
+    log.info('   _       _             _                            ')
+    log.info('  / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|             ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                  ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                  ')
+    log.info('                   |_|                                ')
+    log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.ponsim_olt_adapter_version = self.get_version()
+        self.log.info('Ponsim-OLT-Adapter-Version', version=
+        self.ponsim_olt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            ponsim_olt_adapter = PonSimOltAdapter(
+                adapter_agent=self.core_proxy, config=config)
+            ponsim_request_handler = AdapterRequestFacade(
+                adapter=ponsim_olt_adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    # Needs to assign a real class
+                    target_cls=ponsim_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        # Send registration to Core with adapter specs
+        adapter = Adapter()
+        adapter.id =  self.args.name
+        adapter.vendor = self.args.name
+        adapter.version = self.ponsim_olt_adapter_version
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(adapter)
+                self.log.info('registration-response', response=resp)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/adapters/ponsim_olt/ponsim_olt.py b/adapters/ponsim_olt/ponsim_olt.py
new file mode 100644
index 0000000..5e096b4
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.py
@@ -0,0 +1,755 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Fully simulated OLT adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import adapters.common.openflow.utils as fd
+import grpc
+import structlog
+from scapy.layers.l2 import Ether, Dot1Q
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks
+from grpc._channel import _Rendezvous
+
+from adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from adapters.common.utils.asleep import asleep
+from twisted.internet.task import LoopingCall
+from adapters.iadapter import OltAdapter
+from adapters.protos import third_party
+from adapters.protos import openflow_13_pb2 as ofp
+from adapters.protos import ponsim_pb2
+from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
+from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from google.protobuf.empty_pb2 import Empty
+from adapters.protos.logical_device_pb2 import LogicalPort, LogicalDevice
+from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD, \
+    OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
+    ofp_switch_features, ofp_desc
+from adapters.protos.openflow_13_pb2 import ofp_port
+from adapters.protos.ponsim_pb2 import FlowTable, PonSimFrame
+from adapters.protos.core_adapter_pb2 import SwitchCapability, PortCapability
+from adapters.common.utils.registry import registry
+
+_ = third_party
+log = structlog.get_logger()
+
+PACKET_IN_VLAN = 4000
+is_inband_frame = BpfProgramFilter('(ether[14:2] & 0xfff) = 0x{:03x}'.format(
+    PACKET_IN_VLAN))
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+class AdapterPmMetrics:
+    def __init__(self, device):
+        self.pm_names = {'tx_64_pkts', 'tx_65_127_pkts', 'tx_128_255_pkts',
+                         'tx_256_511_pkts', 'tx_512_1023_pkts',
+                         'tx_1024_1518_pkts', 'tx_1519_9k_pkts',
+                         'rx_64_pkts', 'rx_65_127_pkts',
+                         'rx_128_255_pkts', 'rx_256_511_pkts',
+                         'rx_512_1023_pkts', 'rx_1024_1518_pkts',
+                         'rx_1519_9k_pkts'}
+        self.device = device
+        self.id = device.id
+        self.name = 'ponsim_olt'
+        self.default_freq = 150
+        self.grouped = False
+        self.freq_override = False
+        self.pon_metrics_config = dict()
+        self.nni_metrics_config = dict()
+        self.lc = None
+        for m in self.pm_names:
+            self.pon_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+            self.nni_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+
+    def update(self, pm_config):
+        if self.default_freq != pm_config.default_freq:
+            # Update the callback to the new frequency.
+            self.default_freq = pm_config.default_freq
+            self.lc.stop()
+            self.lc.start(interval=self.default_freq / 10)
+        for m in pm_config.metrics:
+            self.pon_metrics_config[m.name].enabled = m.enabled
+            self.nni_metrics_config[m.name].enabled = m.enabled
+
+    def make_proto(self):
+        pm_config = PmConfigs(
+            id=self.id,
+            default_freq=self.default_freq,
+            grouped=False,
+            freq_override=False)
+        for m in sorted(self.pon_metrics_config):
+            pm = self.pon_metrics_config[m]  # Either will do they're the same
+            pm_config.metrics.extend([PmConfig(name=pm.name,
+                                               type=pm.type,
+                                               enabled=pm.enabled)])
+        return pm_config
+
+    def collect_port_metrics(self, channel):
+        rtrn_port_metrics = dict()
+        stub = ponsim_pb2.PonSimStub(channel)
+        stats = stub.GetStats(Empty())
+        rtrn_port_metrics['pon'] = self.extract_pon_metrics(stats)
+        rtrn_port_metrics['nni'] = self.extract_nni_metrics(stats)
+        return rtrn_port_metrics
+
+    def extract_pon_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "pon":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def extract_nni_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "nni":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def start_collector(self, callback):
+        log.info("starting-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
+        self.lc = LoopingCall(callback, self.device.id, prefix)
+        self.lc.start(interval=self.default_freq / 10)
+
+    def stop_collector(self):
+        log.info("stopping-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        self.lc.stop()
+
+
+class AdapterAlarms:
+    def __init__(self, adapter, device):
+        self.adapter = adapter
+        self.device = device
+        self.lc = None
+
+    def send_alarm(self, context_data, alarm_data):
+        try:
+            current_context = {}
+            for key, value in context_data.__dict__.items():
+                current_context[key] = str(value)
+
+            alarm_event = self.adapter.adapter_agent.create_alarm(
+                resource_id=self.device.id,
+                description="{}.{} - {}".format(self.adapter.name,
+                                                self.device.id,
+                                                alarm_data[
+                                                    'description']) if 'description' in alarm_data else None,
+                type=alarm_data['type'] if 'type' in alarm_data else None,
+                category=alarm_data[
+                    'category'] if 'category' in alarm_data else None,
+                severity=alarm_data[
+                    'severity'] if 'severity' in alarm_data else None,
+                state=alarm_data['state'] if 'state' in alarm_data else None,
+                raised_ts=alarm_data['ts'] if 'ts' in alarm_data else 0,
+                context=current_context
+            )
+
+            self.adapter.adapter_agent.submit_alarm(self.device.id,
+                                                    alarm_event)
+
+        except Exception as e:
+            log.exception('failed-to-send-alarm', e=e)
+
+
+class PonSimOltAdapter(OltAdapter):
+    def __init__(self, adapter_agent, config):
+        super(PonSimOltAdapter, self).__init__(adapter_agent=adapter_agent,
+                                               config=config,
+                                               device_handler_class=PonSimOltHandler,
+                                               name='ponsim_olt',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='ponsim_olt',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+
+
+class PonSimOltHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.channel = None
+        self.io_port = None
+        self.logical_device_id = None
+        self.nni_port = None
+        self.ofp_port_no = None
+        self.interface = registry('main').get_args().interface
+        self.pm_metrics = None
+        self.alarms = None
+        self.frames = None
+
+    @inlineCallbacks
+    def get_channel(self):
+        if self.channel is None:
+            try:
+                device = yield self.adapter_agent.get_device(self.device_id)
+                self.log.info('device-info', device=device, host_port=device.host_and_port)
+                self.channel = grpc.insecure_channel(device.host_and_port)
+            except Exception as e:
+                    log.exception("ponsim-connection-failure", e=e)
+
+        # returnValue(self.channel)
+
+    def close_channel(self):
+        if self.channel is None:
+            self.log.info('grpc-channel-already-closed')
+            return
+        else:
+            if self.frames is not None:
+                self.frames.cancel()
+                self.frames = None
+                self.log.info('cancelled-grpc-frame-stream')
+
+            self.channel.unsubscribe(lambda *args: None)
+            self.channel = None
+
+            self.log.info('grpc-channel-closed')
+
+    def _get_nni_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_NNI)
+        if ports:
+            # For now, we use on one NNI port
+            return ports[0]
+
+    @inlineCallbacks
+    def activate(self, device):
+        try:
+            self.log.info('activating')
+
+            if not device.host_and_port:
+                device.oper_status = OperStatus.FAILED
+                device.reason = 'No host_and_port field provided'
+                self.adapter_agent.device_update(device)
+                return
+
+            yield self.get_channel()
+            stub = ponsim_pb2.PonSimStub(self.channel)
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info, device_id=device.id)
+            self.ofp_port_no = info.nni_port
+
+            device.root = True
+            device.vendor = 'ponsim'
+            device.model = 'n/a'
+            device.serial_number = device.host_and_port
+            device.connect_status = ConnectStatus.REACHABLE
+            yield self.adapter_agent.device_update(device)
+
+            # Now set the initial PM configuration for this device
+            self.pm_metrics = AdapterPmMetrics(device)
+            pm_config = self.pm_metrics.make_proto()
+            log.info("initial-pm-config", pm_config=pm_config)
+            self.adapter_agent.device_pm_config_update(pm_config, init=True)
+
+            # Setup alarm handler
+            self.alarms = AdapterAlarms(self.adapter, device)
+
+            nni_port = Port(
+                port_no=info.nni_port,
+                label='NNI facing Ethernet port',
+                type=Port.ETHERNET_NNI,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            )
+            self.nni_port = nni_port
+            yield self.adapter_agent.port_created(device.id, nni_port)
+            yield self.adapter_agent.port_created(device.id, Port(
+                port_no=1,
+                label='PON port',
+                type=Port.PON_OLT,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            ))
+            yield self.adapter_agent.device_state_update(device.id, oper_status=OperStatus.ACTIVE)
+
+            # register ONUS
+            self.log.info('onu-found', onus=info.onus, len=len(info.onus))
+            for onu in info.onus:
+                vlan_id = onu.uni_port
+                yield self.adapter_agent.child_device_detected(
+                    parent_device_id=device.id,
+                    parent_port_no=1,
+                    child_device_type='ponsim_onu',
+                    channel_id=vlan_id,
+                )
+
+            self.log.info('starting-frame-grpc-stream')
+            reactor.callInThread(self.rcv_grpc)
+            self.log.info('started-frame-grpc-stream')
+
+            # TODO
+            # Start collecting stats from the device after a brief pause
+            # self.start_kpi_collection(device.id)
+        except Exception as e:
+            log.exception("Exception-activating", e=e)
+
+
+    def get_ofp_device_info(self, device):
+        return SwitchCapability(
+            desc=ofp_desc(
+                hw_desc='ponsim pon',
+                sw_desc='ponsim pon',
+                serial_num=device.serial_number,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                    OFPC_FLOW_STATS
+                    | OFPC_TABLE_STATS
+                    | OFPC_PORT_STATS
+                    | OFPC_GROUP_STATS
+                )
+            )
+        )
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no, info=self.ofp_port_no, device_id=device.id)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port = LogicalPort (
+                id='nni',
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                    name='nni',
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                )
+            )
+        )
+
+    def reconcile(self, device):
+        self.log.info('reconciling-OLT-device-starts')
+
+        if not device.host_and_port:
+            device.oper_status = OperStatus.FAILED
+            device.reason = 'No host_and_port field provided'
+            self.adapter_agent.device_update(device)
+            return
+
+        try:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            # TODO: Verify we are connected to the same device we are
+            # reconciling - not much data in ponsim to differentiate at the
+            # time
+            device.oper_status = OperStatus.ACTIVE
+            self.adapter_agent.device_update(device)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+        except Exception, e:
+            log.exception('device-unreachable', e=e)
+            device.connect_status = ConnectStatus.UNREACHABLE
+            device.oper_status = OperStatus.UNKNOWN
+            self.adapter_agent.device_update(device)
+            return
+
+        # Now set the initial PM configuration for this device
+        self.pm_metrics = AdapterPmMetrics(device)
+        pm_config = self.pm_metrics.make_proto()
+        log.info("initial-pm-config", pm_config=pm_config)
+        self.adapter_agent.device_update_pm_config(pm_config, init=True)
+
+        # Setup alarm handler
+        self.alarms = AdapterAlarms(self.adapter, device)
+
+        # TODO: Is there anything required to verify nni and PON ports
+
+        # Set the logical device id
+        device = self.adapter_agent.get_device(device.id)
+        if device.parent_id:
+            self.logical_device_id = device.parent_id
+            self.adapter_agent.reconcile_logical_device(device.parent_id)
+        else:
+            self.log.info('no-logical-device-set')
+
+        # Reconcile child devices
+        self.adapter_agent.reconcile_child_devices(device.id)
+
+        reactor.callInThread(self.rcv_grpc)
+
+        # Start collecting stats from the device after a brief pause
+        self.start_kpi_collection(device.id)
+
+        self.log.info('reconciling-OLT-device-ends')
+
+    @inlineCallbacks
+    def rcv_grpc(self):
+        """
+        This call establishes a GRPC stream to receive frames.
+        """
+        yield self.get_channel()
+        stub = ponsim_pb2.PonSimStub(self.channel)
+        # stub = ponsim_pb2.PonSimStub(self.get_channel())
+
+        # Attempt to establish a grpc stream with the remote ponsim service
+        self.frames = stub.ReceiveFrames(Empty())
+
+        self.log.info('start-receiving-grpc-frames')
+
+        try:
+            for frame in self.frames:
+                self.log.info('received-grpc-frame',
+                              frame_len=len(frame.payload))
+                self._rcv_frame(frame.payload)
+
+        except _Rendezvous, e:
+            log.warn('grpc-connection-lost', message=e.message)
+
+        self.log.info('stopped-receiving-grpc-frames')
+
+
+    # VOLTHA's flow decomposition removes the information about which flows
+    # are trap flows where traffic should be forwarded to the controller.
+    # We'll go through the flows and change the output port of flows that we
+    # know to be trap flows to the OF CONTROLLER port.
+    def update_flow_table(self, flows):
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        self.log.info('pushing-olt-flow-table')
+        for flow in flows:
+            classifier_info = {}
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.ETH_TYPE:
+                    classifier_info['eth_type'] = field.eth_type
+                    self.log.debug('field-type-eth-type',
+                                   eth_type=classifier_info['eth_type'])
+                elif field.type == fd.IP_PROTO:
+                    classifier_info['ip_proto'] = field.ip_proto
+                    self.log.debug('field-type-ip-proto',
+                                   ip_proto=classifier_info['ip_proto'])
+            if ('ip_proto' in classifier_info and (
+                    classifier_info['ip_proto'] == 17 or
+                    classifier_info['ip_proto'] == 2)) or (
+                    'eth_type' in classifier_info and
+                    classifier_info['eth_type'] == 0x888e):
+                for action in fd.get_actions(flow):
+                    if action.type == ofp.OFPAT_OUTPUT:
+                        action.output.port = ofp.OFPP_CONTROLLER
+            self.log.info('out_port', out_port=fd.get_out_port(flow))
+
+        stub.UpdateFlowTable(FlowTable(
+            port=0,
+            flows=flows
+        ))
+        self.log.info('success')
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def update_pm_config(self, device, pm_config):
+        log.info("handler-update-pm-config", device=device,
+                 pm_config=pm_config)
+        self.pm_metrics.update(pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        self.log.info('sending-proxied-message')
+        if isinstance(msg, FlowTable):
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            self.log.info('pushing-onu-flow-table', port=msg.port)
+            res = stub.UpdateFlowTable(msg)
+            self.adapter_agent.receive_proxied_message(proxy_address, res)
+
+    def packet_out(self, egress_port, msg):
+        self.log.info('sending-packet-out', egress_port=egress_port,
+                      msg=hexify(msg))
+        pkt = Ether(msg)
+        out_pkt = pkt
+        if egress_port != self.nni_port.port_no:
+            # don't do the vlan manipulation for the NNI port, vlans are already correct
+            out_pkt = (
+                    Ether(src=pkt.src, dst=pkt.dst) /
+                    Dot1Q(vlan=egress_port, type=pkt.type) /
+                    pkt.payload
+            )
+
+        # TODO need better way of mapping logical ports to PON ports
+        out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
+
+        # send over grpc stream
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        frame = PonSimFrame(id=self.device_id, payload=str(out_pkt), out_port=out_port)
+        stub.SendFrame(frame)
+
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        # Update the operational status to ACTIVATING and connect status to
+        # UNREACHABLE
+        device = self.adapter_agent.get_device(self.device_id)
+        previous_oper_status = device.oper_status
+        previous_conn_status = device.connect_status
+        device.oper_status = OperStatus.ACTIVATING
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to UNREACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.UNREACHABLE)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the operational status back to its previous state.  With a
+        # real OLT the operational state should be the state the device is
+        # after a reboot.
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = previous_oper_status
+        device.connect_status = previous_conn_status
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to REACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.REACHABLE)
+
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        self.stop_kpi_collection()
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Update the operational status to UNKNOWN
+        device.oper_status = OperStatus.UNKNOWN
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Remove the logical device
+        logical_device = self.adapter_agent.get_logical_device(
+            self.logical_device_id)
+        self.adapter_agent.delete_logical_device(logical_device)
+
+        # Disable all child devices first
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      admin_state=AdminState.DISABLED)
+
+        # Remove the peer references from this device
+        self.adapter_agent.delete_all_peer_references(self.device_id)
+
+        # Set all ports to disabled
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        #  Update the logice device mapping
+        if self.logical_device_id in \
+                self.adapter.logical_device_id_to_root_device_id:
+            del self.adapter.logical_device_id_to_root_device_id[
+                self.logical_device_id]
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=device.id)
+
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Set the ofp_port_no and nni_port in case we bypassed the reconcile
+        # process if the device was in DISABLED state on voltha restart
+        if not self.ofp_port_no and not self.nni_port:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+
+        # Update the connect status to REACHABLE
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Set all ports to enabled
+        self.adapter_agent.enable_all_ports(self.device_id)
+
+        ld = LogicalDevice(
+            # not setting id and datapth_id will let the adapter agent pick id
+            desc=ofp_desc(
+                hw_desc='simulated pon',
+                sw_desc='simulated pon',
+                serial_num=uuid4().hex,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                        OFPC_FLOW_STATS
+                        | OFPC_TABLE_STATS
+                        | OFPC_PORT_STATS
+                        | OFPC_GROUP_STATS
+                )
+            ),
+            root_device_id=device.id
+        )
+        mac_address = "AA:BB:CC:DD:EE:FF"
+        ld_initialized = self.adapter_agent.create_logical_device(ld,
+                                                                  dpid=mac_address)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        self.adapter_agent.add_logical_port(ld_initialized.id, LogicalPort(
+            id='nni',
+            ofp_port=ofp_port(
+                port_no=self.ofp_port_no,
+                hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                name='nni',
+                config=0,
+                state=OFPPS_LIVE,
+                curr=cap,
+                advertised=cap,
+                peer=cap,
+                curr_speed=OFPPF_1GB_FD,
+                max_speed=OFPPF_1GB_FD
+            ),
+            device_id=device.id,
+            device_port_no=self.nni_port.port_no,
+            root_port=True
+        ))
+
+        device = self.adapter_agent.get_device(device.id)
+        device.parent_id = ld_initialized.id
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.device_update(device)
+        self.logical_device_id = ld_initialized.id
+
+        # Reenable all child devices
+        self.adapter_agent.update_child_devices_state(device.id,
+                                                      admin_state=AdminState.ENABLED)
+
+        # establish frame grpc-stream
+        reactor.callInThread(self.rcv_grpc)
+
+        self.start_kpi_collection(device.id)
+
+        self.log.info('re-enabled', device_id=device.id)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        # Remove all child devices
+        self.adapter_agent.delete_all_child_devices(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
+
+    def start_kpi_collection(self, device_id):
+
+        def _collect(device_id, prefix):
+
+            try:
+                # Step 1: gather metrics from device
+                port_metrics = \
+                    self.pm_metrics.collect_port_metrics(self.get_channel())
+
+                # Step 2: prepare the KpiEvent for submission
+                # we can time-stamp them here (or could use time derived from OLT
+                ts = arrow.utcnow().timestamp
+                kpi_event = KpiEvent(
+                    type=KpiEventType.slice,
+                    ts=ts,
+                    prefixes={
+                        # OLT NNI port
+                        prefix + '.nni': MetricValuePairs(
+                            metrics=port_metrics['nni']),
+                        # OLT PON port
+                        prefix + '.pon': MetricValuePairs(
+                            metrics=port_metrics['pon'])
+                    }
+                )
+
+                # Step 3: submit
+                self.adapter_agent.submit_kpis(kpi_event)
+
+            except Exception as e:
+                log.exception('failed-to-submit-kpis', e=e)
+
+        self.pm_metrics.start_collector(_collect)
+
+    def stop_kpi_collection(self):
+        self.pm_metrics.stop_collector()
diff --git a/adapters/ponsim_olt/ponsim_olt.yml b/adapters/ponsim_olt/ponsim_olt.yml
new file mode 100644
index 0000000..fdb647a
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.yml
@@ -0,0 +1,52 @@
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: ponsim_olt.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/adapters/ponsim_onu/VERSION b/adapters/ponsim_onu/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/adapters/ponsim_onu/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/adapters/ponsim_onu/__init__.py b/adapters/ponsim_onu/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/ponsim_onu/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/ponsim_onu/main.py b/adapters/ponsim_onu/main.py
new file mode 100755
index 0000000..63e2bc4
--- /dev/null
+++ b/adapters/ponsim_onu/main.py
@@ -0,0 +1,485 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim ONU Adapter main entry point"""
+
+import argparse
+import arrow
+import os
+import time
+
+import yaml
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+from adapters.protos import third_party
+from adapters.common.structlog_setup import setup_logging, update_logging
+from adapters.common.utils.dockerhelpers import get_my_containers_name
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adapters.common.utils.registry import registry, IComponent
+from packaging.version import Version
+from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, get_messaging_proxy
+from adapters.ponsim_onu.ponsim_onu import PonSimOnuAdapter
+from adapters.protos.adapter_pb2 import AdapterConfig, Adapter
+from adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from adapters.kafka.core_proxy import CoreProxy
+from adapters.common.utils.deferred_utils import TimeOutError
+from adapters.common.utils.asleep import asleep
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './ponsim_onu.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'ponsim_onu'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'ponsim_onu'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to ponsim_onu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of ponsim adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+    log.info(' ____                 _              ___  _   _ _   _    ')
+    log.info('|  _ \ ___  _ __  ___(_)_ __ ___    / _ \| \ | | | | |   ')
+    log.info('| |_) / _ \| \'_ \/ __| | \'_ ` _ \  | | | |  \| | | | | ')
+    log.info('|  __/ (_) | | | \__ \ | | | | | | | |_| | |\  | |_| |   ')
+    log.info('|_|   \___/|_| |_|___/_|_| |_| |_|  \___/|_| \_|\___/    ')
+    log.info('    _       _             _                            ')
+    log.info('   / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|              ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                   ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                   ')
+    log.info('                   |_|                                 ')
+    log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.ponsim_olt_adapter_version = self.get_version()
+        self.log.info('Ponsim-ONU-Adapter-Version', version=
+        self.ponsim_olt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            ponsim_onu_adapter = PonSimOnuAdapter(
+                adapter_agent=self.core_proxy, config=config)
+            ponsim_request_handler = AdapterRequestFacade(
+                adapter=ponsim_onu_adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    target_cls=ponsim_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        # Send registration to Core with adapter specs
+        adapter = Adapter()
+        adapter.id =  self.args.name
+        adapter.vendor = self.args.name
+        adapter.version = self.ponsim_olt_adapter_version
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(adapter)
+                self.log.info('registration-response', response=resp)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/adapters/ponsim_onu/ponsim_onu.py b/adapters/ponsim_onu/ponsim_onu.py
new file mode 100644
index 0000000..7e35c7f
--- /dev/null
+++ b/adapters/ponsim_onu/ponsim_onu.py
@@ -0,0 +1,373 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Fully simulated OLT/ONU adapter.
+"""
+
+import sys
+import structlog
+from twisted.internet.defer import DeferredQueue, inlineCallbacks
+from adapters.common.utils.asleep import asleep
+
+from adapters.iadapter import OnuAdapter
+from adapters.protos import third_party
+from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from adapters.protos.device_pb2 import Port
+from adapters.protos.logical_device_pb2 import LogicalPort
+from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD
+from adapters.protos.openflow_13_pb2 import ofp_port
+from adapters.protos.ponsim_pb2 import FlowTable
+from adapters.protos.core_adapter_pb2 import PortCapability
+
+_ = third_party
+log = structlog.get_logger()
+
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+class PonSimOnuAdapter(OnuAdapter):
+    def __init__(self, adapter_agent, config):
+        # DeviceType of ONU should be same as VENDOR ID of ONU Serial Number as specified by standard
+        # requires for identifying correct adapter or ranged ONU
+        super(PonSimOnuAdapter, self).__init__(adapter_agent=adapter_agent,
+                                               config=config,
+                                               device_handler_class=PonSimOnuHandler,
+                                               name='ponsim_onu',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='ponsim_onu',
+                                               vendor_id='PSMO',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+
+
+class PonSimOnuHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.incoming_messages = DeferredQueue()
+        self.proxy_address = None
+        # reference of uni_port is required when re-enabling the device if
+        # it was disabled previously
+        self.uni_port = None
+        self.pon_port = None
+
+    def receive_message(self, msg):
+        self.incoming_messages.put(msg)
+
+
+    @inlineCallbacks
+    def activate(self, device):
+        self.log.info('activating')
+
+        # TODO:  Register for proxy address
+        # # first we verify that we got parent reference and proxy info
+        # assert device.parent_id
+        # assert device.proxy_address.device_id
+        # assert device.proxy_address.channel_id
+        #
+        # # register for proxied messages right away
+        # self.proxy_address = device.proxy_address
+        # self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+        # populate device info
+        device.root = False
+        device.vendor = 'ponsim'
+        device.model = 'n/a'
+        device.connect_status = ConnectStatus.REACHABLE
+        yield self.adapter_agent.device_update(device)
+
+        # register physical ports
+        self.uni_port = Port(
+            port_no=2,
+            label='UNI facing Ethernet port',
+            type=Port.ETHERNET_UNI,
+            admin_state=AdminState.ENABLED,
+            oper_status=OperStatus.ACTIVE
+        )
+        self.pon_port = Port(
+            port_no=1,
+            label='PON port',
+            type=Port.PON_ONU,
+            admin_state=AdminState.ENABLED,
+            oper_status=OperStatus.ACTIVE,
+            peers=[
+                Port.PeerPort(
+                    device_id=device.parent_id,
+                    port_no=device.parent_port_no
+                )
+            ]
+        )
+        self.adapter_agent.port_created(device.id, self.uni_port)
+        self.adapter_agent.port_created(device.id, self.pon_port)
+
+        yield self.adapter_agent.device_state_update(device.id, oper_status=OperStatus.ACTIVE)
+
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no, device_id=device.id)
+        # port_no = device.proxy_address.channel_id
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port = LogicalPort (
+                id='uni-{}'.format(port_no),
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
+                    name='uni-{}'.format(port_no),
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                )
+            )
+        )
+
+    def _get_uni_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_UNI)
+        if ports:
+            # For now, we use on one uni port
+            return ports[0]
+
+    def _get_pon_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.PON_ONU)
+        if ports:
+            # For now, we use on one uni port
+            return ports[0]
+
+    def reconcile(self, device):
+        self.log.info('reconciling-ONU-device-starts')
+
+        # first we verify that we got parent reference and proxy info
+        assert device.parent_id
+        assert device.proxy_address.device_id
+        assert device.proxy_address.channel_id
+
+        # register for proxied messages right away
+        self.proxy_address = device.proxy_address
+        self.adapter_agent.register_for_proxied_messages(device.proxy_address)
+
+        # Set the connection status to REACHABLE
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.update_device(device)
+
+        # TODO: Verify that the uni, pon and logical ports exists
+
+        # Mark the device as REACHABLE and ACTIVE
+        device = self.adapter_agent.get_device(device.id)
+        device.connect_status = ConnectStatus.REACHABLE
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.update_device(device)
+
+        self.log.info('reconciling-ONU-device-ends')
+
+    @inlineCallbacks
+    def update_flow_table(self, flows):
+
+        # we need to proxy through the OLT to get to the ONU
+
+        # reset response queue
+        while self.incoming_messages.pending:
+            yield self.incoming_messages.get()
+
+        msg = FlowTable(
+            port=self.proxy_address.channel_id,
+            flows=flows
+        )
+        self.adapter_agent.send_proxied_message(self.proxy_address, msg)
+
+        yield self.incoming_messages.get()
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes.
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        # Update the operational status to ACTIVATING and connect status to
+        # UNREACHABLE
+        device = self.adapter_agent.get_device(self.device_id)
+        previous_oper_status = device.oper_status
+        previous_conn_status = device.connect_status
+        device.oper_status = OperStatus.ACTIVATING
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.update_device(device)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the operational status back to its previous state.  With a
+        # real OLT the operational state should be the state the device is
+        # after a reboot.
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = previous_oper_status
+        device.connect_status = previous_conn_status
+        self.adapter_agent.update_device(device)
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Disable all ports on that device
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        # Update the device operational status to UNKNOWN
+        device.oper_status = OperStatus.UNKNOWN
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.update_device(device)
+
+        # Remove the uni logical port from the OLT, if still present
+        parent_device = self.adapter_agent.get_device(device.parent_id)
+        assert parent_device
+        logical_device_id = parent_device.parent_id
+        assert logical_device_id
+        port_no = device.proxy_address.channel_id
+        port_id = 'uni-{}'.format(port_no)
+        try:
+            port = self.adapter_agent.get_logical_port(logical_device_id,
+                                                       port_id)
+            self.adapter_agent.delete_logical_port(logical_device_id, port)
+        except KeyError:
+            self.log.info('logical-port-not-found', device_id=self.device_id,
+                          portid=port_id)
+
+        # Remove pon port from parent
+        self.pon_port = self._get_pon_port()
+        self.adapter_agent.delete_port_reference_from_parent(self.device_id,
+                                                             self.pon_port)
+
+        # Just updating the port status may be an option as well
+        # port.ofp_port.config = OFPPC_NO_RECV
+        # yield self.adapter_agent.update_logical_port(logical_device_id,
+        #                                             port)
+        # Unregister for proxied message
+        self.adapter_agent.unregister_for_proxied_messages(
+            device.proxy_address)
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=device.id)
+
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+        try:
+            # Get the latest device reference
+            device = self.adapter_agent.get_device(self.device_id)
+
+            # First we verify that we got parent reference and proxy info
+            assert device.parent_id
+            assert device.proxy_address.device_id
+            assert device.proxy_address.channel_id
+
+            # Re-register for proxied messages right away
+            self.proxy_address = device.proxy_address
+            self.adapter_agent.register_for_proxied_messages(
+                device.proxy_address)
+
+            # Re-enable the ports on that device
+            self.adapter_agent.enable_all_ports(self.device_id)
+
+            # Refresh the port reference
+            self.uni_port = self._get_uni_port()
+            self.pon_port = self._get_pon_port()
+
+            # Add the pon port reference to the parent
+            self.adapter_agent.add_port_reference_to_parent(device.id,
+                                                            self.pon_port)
+
+            # Update the connect status to REACHABLE
+            device.connect_status = ConnectStatus.REACHABLE
+            self.adapter_agent.update_device(device)
+
+            # re-add uni port to logical device
+            parent_device = self.adapter_agent.get_device(device.parent_id)
+            logical_device_id = parent_device.parent_id
+            assert logical_device_id
+            port_no = device.proxy_address.channel_id
+            cap = OFPPF_1GB_FD | OFPPF_FIBER
+            self.adapter_agent.add_logical_port(logical_device_id, LogicalPort(
+                id='uni-{}'.format(port_no),
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple('00:00:00:00:00:%02x' % port_no),
+                    name='uni-{}'.format(port_no),
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                ),
+                device_id=device.id,
+                device_port_no=self.uni_port.port_no
+            ))
+
+            device = self.adapter_agent.get_device(device.id)
+            device.oper_status = OperStatus.ACTIVE
+            self.adapter_agent.update_device(device)
+
+            self.log.info('re-enabled', device_id=device.id)
+        except Exception, e:
+            self.log.exception('error-reenabling', e=e)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        # A delete request may be received when an OLT is dsiabled
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
diff --git a/adapters/ponsim_onu/ponsim_onu.yml b/adapters/ponsim_onu/ponsim_onu.yml
new file mode 100644
index 0000000..1afafdf
--- /dev/null
+++ b/adapters/ponsim_onu/ponsim_onu.yml
@@ -0,0 +1,52 @@
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: ponsim_onu.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/adapters/protos/Makefile b/adapters/protos/Makefile
new file mode 100644
index 0000000..0fad970
--- /dev/null
+++ b/adapters/protos/Makefile
@@ -0,0 +1,101 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+default: third_party build
+
+PROTO_FILES := $(wildcard ../../protos/*.proto)
+PROTO_GOOGLE_API := $(wildcard third_party/google/api/*.proto)
+PROTO_ALL_FILES := $(PROTO_FILES) $(PROTO_GOOGLE_API)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2.py,$(f)))
+PROTO_PB2_GRPC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,_pb2_grpc.py,$(f)))
+PROTO_DESC_GOOGLE_API := $(foreach f,$(PROTO_GOOGLE_API),$(subst .proto,.desc,$(f)))
+
+PROTOC_PREFIX := /usr/local
+PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
+
+PROTOC := $(PROTOC_PREFIX)/bin/protoc
+
+PROTOC_VERSION := "3.3.0"
+PROTOC_DOWNLOAD_PREFIX := "https://github.com/google/protobuf/releases/download"
+PROTOC_DIR := protobuf-$(PROTOC_VERSION)
+PROTOC_TARBALL := protobuf-python-$(PROTOC_VERSION).tar.gz
+PROTOC_DOWNLOAD_URI := $(PROTOC_DOWNLOAD_PREFIX)/v$(PROTOC_VERSION)/$(PROTOC_TARBALL)
+PROTOC_BUILD_TMP_DIR := "/tmp/protobuf-build-$(shell uname -s | tr '[:upper:]' '[:lower:]')"
+
+# Google API needs to be built from within the third party directory
+#
+third_party: google_api
+google_api:
+	@echo "Building protocol buffer artifacts from third_party google api"
+	cd third_party ; \
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I. \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=google/api/annotations.desc \
+	    --include_imports \
+	    --include_source_info \
+        google/api/annotations.proto google/api/http.proto
+
+build: $(PROTOC) $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto Makefile
+	@echo "Building protocol buffer artifacts from $<"
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I../../protos \
+	    -I./third_party \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=./$(basename $(notdir $<)).desc \
+	    --include_imports \
+	    --include_source_info \
+	    $<
+
+clean:
+	rm -f *.desc *_pb2* \
+		$(PROTO_PB2_GOOGLE_API) \
+		$(PROTO_PB2_GRPC_GOOGLE_API)\
+		$(PROTO_DESC_GOOGLE_API)
+
+$(PROTOC):
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+	@echo "It looks like you don't have protocol buffer tools installed."
+	@echo "To install the protocol buffer toolchain, you can run:"
+	@echo "    make install-protoc"
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+
+install-protoc: $(PROTOC)
+	@echo "Downloading and installing protocol buffer support."
+	@echo "Installation will require sodo priviledges"
+	@echo "This will take a few minutes."
+	mkdir -p $(PROTOC_BUILD_TMP_DIR)
+	@echo "We ask for sudo credentials now so we can install at the end"; \
+	sudo echo "Thanks"; \
+	    cd $(PROTOC_BUILD_TMP_DIR); \
+	    wget $(PROTOC_DOWNLOAD_URI); \
+	    tar xzvf $(PROTOC_TARBALL); \
+	    cd $(PROTOC_DIR); \
+	    ./configure --prefix=$(PROTOC_PREFIX); \
+	    make; \
+	    sudo make install
+
+uninstall-protoc:
+	cd $(PROTOC_BUILD_TMP_DIR)/$(PROTOC_DIR); \
+	    sudo make uninstall
+
diff --git a/adapters/protos/__init__.py b/adapters/protos/__init__.py
new file mode 100644
index 0000000..cfcdc97
--- /dev/null
+++ b/adapters/protos/__init__.py
@@ -0,0 +1,15 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
\ No newline at end of file
diff --git a/adapters/protos/third_party/__init__.py b/adapters/protos/third_party/__init__.py
new file mode 100644
index 0000000..2740afe
--- /dev/null
+++ b/adapters/protos/third_party/__init__.py
@@ -0,0 +1,53 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+This helps loading http_pb2 and annotations_pb2.
+Without this, the Python importer will not be able to process the lines:
+from google.api import http_pb2 or
+from google.api import annotations_pb2
+(Without importing these, the protobuf loader will not recognize http options
+in the protobuf definitions.)
+"""
+
+from importlib import import_module
+import os
+import sys
+
+
+class GoogleApiImporter(object):
+
+    def find_module(self, full_name, path=None):
+        if full_name == 'google.api':
+            self.path = [os.path.dirname(__file__)]
+            return self
+
+    def load_module(self, name):
+        if name in sys.modules:
+            return sys.modules[name]
+        full_name = 'adapters.protos.third_party.' + name
+        import_module(full_name)
+        module = sys.modules[full_name]
+        sys.modules[name] = module
+        return module
+
+
+sys.meta_path.append(GoogleApiImporter())
+try:
+    from google.api import http_pb2, annotations_pb2
+    _ = http_pb2, annotations_pb2
+except AssertionError:
+    pass
diff --git a/adapters/protos/third_party/google/LICENSE b/adapters/protos/third_party/google/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/adapters/protos/third_party/google/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/adapters/protos/third_party/google/__init__.py b/adapters/protos/third_party/google/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/protos/third_party/google/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/protos/third_party/google/api/__init__.py b/adapters/protos/third_party/google/api/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/protos/third_party/google/api/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/protos/third_party/google/api/annotations.proto b/adapters/protos/third_party/google/api/annotations.proto
new file mode 100644
index 0000000..cbd18b8
--- /dev/null
+++ b/adapters/protos/third_party/google/api/annotations.proto
@@ -0,0 +1,29 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+import "google/api/http.proto";
+import "google/protobuf/descriptor.proto";
+
+option java_multiple_files = true;
+option java_outer_classname = "AnnotationsProto";
+option java_package = "com.google.api";
+
+extend google.protobuf.MethodOptions {
+  // See `HttpRule`.
+  HttpRule http = 72295728;
+}
diff --git a/adapters/protos/third_party/google/api/http.proto b/adapters/protos/third_party/google/api/http.proto
new file mode 100644
index 0000000..ce07aa1
--- /dev/null
+++ b/adapters/protos/third_party/google/api/http.proto
@@ -0,0 +1,127 @@
+// Copyright (c) 2015, Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.api;
+
+option java_multiple_files = true;
+option java_outer_classname = "HttpProto";
+option java_package = "com.google.api";
+
+
+// `HttpRule` defines the mapping of an RPC method to one or more HTTP REST API
+// methods. The mapping determines what portions of the request message are
+// populated from the path, query parameters, or body of the HTTP request.  The
+// mapping is typically specified as an `google.api.http` annotation, see
+// "google/api/annotations.proto" for details.
+//
+// The mapping consists of a mandatory field specifying a path template and an
+// optional `body` field specifying what data is represented in the HTTP request
+// body. The field name for the path indicates the HTTP method. Example:
+//
+// ```
+// package google.storage.v2;
+//
+// import "google/api/annotations.proto";
+//
+// service Storage {
+//   rpc CreateObject(CreateObjectRequest) returns (Object) {
+//     option (google.api.http) {
+//       post: "/v2/{bucket_name=buckets/*}/objects"
+//       body: "object"
+//     };
+//   };
+// }
+// ```
+//
+// Here `bucket_name` and `object` bind to fields of the request message
+// `CreateObjectRequest`.
+//
+// The rules for mapping HTTP path, query parameters, and body fields
+// to the request message are as follows:
+//
+// 1. The `body` field specifies either `*` or a field path, or is
+//    omitted. If omitted, it assumes there is no HTTP body.
+// 2. Leaf fields (recursive expansion of nested messages in the
+//    request) can be classified into three types:
+//     (a) Matched in the URL template.
+//     (b) Covered by body (if body is `*`, everything except (a) fields;
+//         else everything under the body field)
+//     (c) All other fields.
+// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
+// 4. Any body sent with an HTTP request can contain only (b) fields.
+//
+// The syntax of the path template is as follows:
+//
+//     Template = "/" Segments [ Verb ] ;
+//     Segments = Segment { "/" Segment } ;
+//     Segment  = "*" | "**" | LITERAL | Variable ;
+//     Variable = "{" FieldPath [ "=" Segments ] "}" ;
+//     FieldPath = IDENT { "." IDENT } ;
+//     Verb     = ":" LITERAL ;
+//
+// `*` matches a single path component, `**` zero or more path components, and
+// `LITERAL` a constant.  A `Variable` can match an entire path as specified
+// again by a template; this nested template must not contain further variables.
+// If no template is given with a variable, it matches a single path component.
+// The notation `{var}` is henceforth equivalent to `{var=*}`.
+//
+// Use CustomHttpPattern to specify any HTTP method that is not included in the
+// pattern field, such as HEAD, or "*" to leave the HTTP method unspecified for
+// a given URL path rule. The wild-card rule is useful for services that provide
+// content to Web (HTML) clients.
+message HttpRule {
+
+  // Determines the URL pattern is matched by this rules. This pattern can be
+  // used with any of the {get|put|post|delete|patch} methods. A custom method
+  // can be defined using the 'custom' field.
+  oneof pattern {
+    // Used for listing and getting information about resources.
+    string get = 2;
+
+    // Used for updating a resource.
+    string put = 3;
+
+    // Used for creating a resource.
+    string post = 4;
+
+    // Used for deleting a resource.
+    string delete = 5;
+
+    // Used for updating a resource.
+    string patch = 6;
+
+    // Custom pattern is used for defining custom verbs.
+    CustomHttpPattern custom = 8;
+  }
+
+  // The name of the request field whose value is mapped to the HTTP body, or
+  // `*` for mapping all fields not captured by the path pattern to the HTTP
+  // body.
+  string body = 7;
+
+  // Additional HTTP bindings for the selector. Nested bindings must not
+  // specify a selector and must not contain additional bindings.
+  repeated HttpRule additional_bindings = 11;
+}
+
+// A custom pattern is used for defining custom HTTP verb.
+message CustomHttpPattern {
+  // The name of this custom HTTP verb.
+  string kind = 1;
+
+  // The path matched by this custom verb.
+  string path = 2;
+}
diff --git a/adapters/requirements.txt b/adapters/requirements.txt
new file mode 100755
index 0000000..a0641b2
--- /dev/null
+++ b/adapters/requirements.txt
@@ -0,0 +1,68 @@
+argparse==1.2.1
+arrow==0.10.0
+bitstring==3.1.5
+cmd2==0.7.0
+colorama==0.3.9
+cython==0.24.1
+decorator==4.1.2
+docker-py==1.10.6
+fluent-logger==0.6.0
+grpc==0.3.post19
+grpcio==1.3.5
+grpcio-tools==1.3.5
+hash_ring==1.3.1
+hexdump==3.3
+jinja2==2.8
+jsonpatch==1.16
+kafka_python==1.3.5
+klein==17.10.0
+kubernetes==5.0.0
+netaddr==0.7.19
+networkx==2.0
+nose==1.3.7
+nose-exclude==0.5.0
+nose-testconfig==0.10
+mock==2.0.0
+netifaces==0.10.6
+pcapy==0.11.1
+pep8==1.7.1
+pep8-naming>=0.3.3
+protobuf==3.3.0
+protobuf-to-dict==0.1.0
+pyflakes==1.6.0
+pylint==1.7.6
+#pypcap>=1.1.5
+pyOpenSSL==17.3.0
+PyYAML==3.12
+requests==2.18.4
+scapy==2.3.3
+service-identity==17.0.0
+simplejson==3.12.0
+jsonschema==2.6.0
+six==1.11.0
+structlog==17.2.0
+termcolor==1.1.0
+transitions==0.6.4
+treq==17.8.0
+Twisted==17.9.0
+txaioetcd==0.3.0
+urllib3==1.22
+pyang==1.7.3
+lxml==3.6.4
+nosexcover==1.0.11
+zmq==0.0.0
+pyzmq==16.0.3
+txZMQ==0.8.0
+ncclient==0.5.3
+xmltodict==0.11.0
+dicttoxml==1.7.4
+etcd3==0.7.0
+pyparsing==2.2.0
+packaging==17.1
+
+# python-consul>=0.6.1  we need the pre-released version for now, because 0.6.1 does not
+# yet support Twisted. Once this is released, it will be the 0.6.2 version
+git+https://github.com/cablehead/python-consul.git
+
+# Twisted Python kafka client
+git+https://github.com/ciena/afkak.git