This commit consists of the following:
1) The kafka messaging proxy in Twisted python for adapters
2) Initial implementation and containerization of ponsim OLT adapter
and ponsim ONU adapter
3) Initial submission of request and response facade in both Twisted
python and Go Language
4) Initial implementation of device management and logical device management
in the Core
5) Update to the log module to allow dynamic setting of log level per
package using the gRPC API
6) Bug fixes and minor changes

Change-Id: Ia8f033da84cfd08275335bae9542802415e7bb0f
diff --git a/adapters/ponsim_olt/VERSION b/adapters/ponsim_olt/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/adapters/ponsim_olt/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/adapters/ponsim_olt/__init__.py b/adapters/ponsim_olt/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/adapters/ponsim_olt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/adapters/ponsim_olt/main.py b/adapters/ponsim_olt/main.py
new file mode 100755
index 0000000..53745ee
--- /dev/null
+++ b/adapters/ponsim_olt/main.py
@@ -0,0 +1,487 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim OLT Adapter main entry point"""
+
+import argparse
+import arrow
+import os
+import time
+
+import yaml
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+from adapters.protos import third_party
+from adapters.common.structlog_setup import setup_logging, update_logging
+from adapters.common.utils.dockerhelpers import get_my_containers_name
+from adapters.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from adapters.common.utils.registry import registry, IComponent
+from packaging.version import Version
+from adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, get_messaging_proxy
+from adapters.ponsim_olt.ponsim_olt import PonSimOltAdapter
+from adapters.protos.adapter_pb2 import AdapterConfig, Adapter
+from adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from adapters.kafka.core_proxy import CoreProxy
+from adapters.common.utils.deferred_utils import TimeOutError
+from adapters.common.utils.asleep import asleep
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './ponsim_olt.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'ponsim_olt'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'ponsim_olt'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to ponsim_onu.yml config file (default: %s). '
+             'If relative, it is relative to main.py of ponsim adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+def print_banner(log):
+    log.info(' ____                 _              ___  _   _____   ')
+    log.info('|  _ \ ___  _ __  ___(_)_ __ ___    / _ \| | |_   _|  ')
+    log.info('| |_) / _ \| \'_ \/ __| | \'_ ` _ \  | | | | |   | |  ')
+    log.info('|  __/ (_) | | | \__ \ | | | | | | | |_| | |___| |    ')
+    log.info('|_|   \___/|_| |_|___/_|_| |_| |_|  \___/|_____|_|    ')
+    log.info('                                                      ')
+    log.info('   _       _             _                            ')
+    log.info('  / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|             ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                  ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                  ')
+    log.info('                   |_|                                ')
+    log.info('(to stop: press Ctrl-C)')
+
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.ponsim_olt_adapter_version = self.get_version()
+        self.log.info('Ponsim-OLT-Adapter-Version', version=
+        self.ponsim_olt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            ponsim_olt_adapter = PonSimOltAdapter(
+                adapter_agent=self.core_proxy, config=config)
+            ponsim_request_handler = AdapterRequestFacade(
+                adapter=ponsim_olt_adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+                    # Needs to assign a real class
+                    target_cls=ponsim_request_handler
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        # Send registration to Core with adapter specs
+        adapter = Adapter()
+        adapter.id =  self.args.name
+        adapter.vendor = self.args.name
+        adapter.version = self.ponsim_olt_adapter_version
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(adapter)
+                self.log.info('registration-response', response=resp)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/adapters/ponsim_olt/ponsim_olt.py b/adapters/ponsim_olt/ponsim_olt.py
new file mode 100644
index 0000000..5e096b4
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.py
@@ -0,0 +1,755 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Fully simulated OLT adapter.
+"""
+from uuid import uuid4
+
+import arrow
+import adapters.common.openflow.utils as fd
+import grpc
+import structlog
+from scapy.layers.l2 import Ether, Dot1Q
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks
+from grpc._channel import _Rendezvous
+
+from adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from adapters.common.utils.asleep import asleep
+from twisted.internet.task import LoopingCall
+from adapters.iadapter import OltAdapter
+from adapters.protos import third_party
+from adapters.protos import openflow_13_pb2 as ofp
+from adapters.protos import ponsim_pb2
+from adapters.protos.common_pb2 import OperStatus, ConnectStatus, AdminState
+from adapters.protos.device_pb2 import Port, PmConfig, PmConfigs
+from adapters.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from google.protobuf.empty_pb2 import Empty
+from adapters.protos.logical_device_pb2 import LogicalPort, LogicalDevice
+from adapters.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD, \
+    OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
+    ofp_switch_features, ofp_desc
+from adapters.protos.openflow_13_pb2 import ofp_port
+from adapters.protos.ponsim_pb2 import FlowTable, PonSimFrame
+from adapters.protos.core_adapter_pb2 import SwitchCapability, PortCapability
+from adapters.common.utils.registry import registry
+
+_ = third_party
+log = structlog.get_logger()
+
+PACKET_IN_VLAN = 4000
+is_inband_frame = BpfProgramFilter('(ether[14:2] & 0xfff) = 0x{:03x}'.format(
+    PACKET_IN_VLAN))
+
+def mac_str_to_tuple(mac):
+    return tuple(int(d, 16) for d in mac.split(':'))
+
+class AdapterPmMetrics:
+    def __init__(self, device):
+        self.pm_names = {'tx_64_pkts', 'tx_65_127_pkts', 'tx_128_255_pkts',
+                         'tx_256_511_pkts', 'tx_512_1023_pkts',
+                         'tx_1024_1518_pkts', 'tx_1519_9k_pkts',
+                         'rx_64_pkts', 'rx_65_127_pkts',
+                         'rx_128_255_pkts', 'rx_256_511_pkts',
+                         'rx_512_1023_pkts', 'rx_1024_1518_pkts',
+                         'rx_1519_9k_pkts'}
+        self.device = device
+        self.id = device.id
+        self.name = 'ponsim_olt'
+        self.default_freq = 150
+        self.grouped = False
+        self.freq_override = False
+        self.pon_metrics_config = dict()
+        self.nni_metrics_config = dict()
+        self.lc = None
+        for m in self.pm_names:
+            self.pon_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+            self.nni_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+
+    def update(self, pm_config):
+        if self.default_freq != pm_config.default_freq:
+            # Update the callback to the new frequency.
+            self.default_freq = pm_config.default_freq
+            self.lc.stop()
+            self.lc.start(interval=self.default_freq / 10)
+        for m in pm_config.metrics:
+            self.pon_metrics_config[m.name].enabled = m.enabled
+            self.nni_metrics_config[m.name].enabled = m.enabled
+
+    def make_proto(self):
+        pm_config = PmConfigs(
+            id=self.id,
+            default_freq=self.default_freq,
+            grouped=False,
+            freq_override=False)
+        for m in sorted(self.pon_metrics_config):
+            pm = self.pon_metrics_config[m]  # Either will do they're the same
+            pm_config.metrics.extend([PmConfig(name=pm.name,
+                                               type=pm.type,
+                                               enabled=pm.enabled)])
+        return pm_config
+
+    def collect_port_metrics(self, channel):
+        rtrn_port_metrics = dict()
+        stub = ponsim_pb2.PonSimStub(channel)
+        stats = stub.GetStats(Empty())
+        rtrn_port_metrics['pon'] = self.extract_pon_metrics(stats)
+        rtrn_port_metrics['nni'] = self.extract_nni_metrics(stats)
+        return rtrn_port_metrics
+
+    def extract_pon_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "pon":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def extract_nni_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "nni":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def start_collector(self, callback):
+        log.info("starting-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
+        self.lc = LoopingCall(callback, self.device.id, prefix)
+        self.lc.start(interval=self.default_freq / 10)
+
+    def stop_collector(self):
+        log.info("stopping-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        self.lc.stop()
+
+
+class AdapterAlarms:
+    def __init__(self, adapter, device):
+        self.adapter = adapter
+        self.device = device
+        self.lc = None
+
+    def send_alarm(self, context_data, alarm_data):
+        try:
+            current_context = {}
+            for key, value in context_data.__dict__.items():
+                current_context[key] = str(value)
+
+            alarm_event = self.adapter.adapter_agent.create_alarm(
+                resource_id=self.device.id,
+                description="{}.{} - {}".format(self.adapter.name,
+                                                self.device.id,
+                                                alarm_data[
+                                                    'description']) if 'description' in alarm_data else None,
+                type=alarm_data['type'] if 'type' in alarm_data else None,
+                category=alarm_data[
+                    'category'] if 'category' in alarm_data else None,
+                severity=alarm_data[
+                    'severity'] if 'severity' in alarm_data else None,
+                state=alarm_data['state'] if 'state' in alarm_data else None,
+                raised_ts=alarm_data['ts'] if 'ts' in alarm_data else 0,
+                context=current_context
+            )
+
+            self.adapter.adapter_agent.submit_alarm(self.device.id,
+                                                    alarm_event)
+
+        except Exception as e:
+            log.exception('failed-to-send-alarm', e=e)
+
+
+class PonSimOltAdapter(OltAdapter):
+    def __init__(self, adapter_agent, config):
+        super(PonSimOltAdapter, self).__init__(adapter_agent=adapter_agent,
+                                               config=config,
+                                               device_handler_class=PonSimOltHandler,
+                                               name='ponsim_olt',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='ponsim_olt',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+
+    def update_pm_config(self, device, pm_config):
+        log.info("adapter-update-pm-config", device=device,
+                 pm_config=pm_config)
+        handler = self.devices_handlers[device.id]
+        handler.update_pm_config(device, pm_config)
+
+
+
+class PonSimOltHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.adapter_agent = adapter.adapter_agent
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.channel = None
+        self.io_port = None
+        self.logical_device_id = None
+        self.nni_port = None
+        self.ofp_port_no = None
+        self.interface = registry('main').get_args().interface
+        self.pm_metrics = None
+        self.alarms = None
+        self.frames = None
+
+    @inlineCallbacks
+    def get_channel(self):
+        if self.channel is None:
+            try:
+                device = yield self.adapter_agent.get_device(self.device_id)
+                self.log.info('device-info', device=device, host_port=device.host_and_port)
+                self.channel = grpc.insecure_channel(device.host_and_port)
+            except Exception as e:
+                    log.exception("ponsim-connection-failure", e=e)
+
+        # returnValue(self.channel)
+
+    def close_channel(self):
+        if self.channel is None:
+            self.log.info('grpc-channel-already-closed')
+            return
+        else:
+            if self.frames is not None:
+                self.frames.cancel()
+                self.frames = None
+                self.log.info('cancelled-grpc-frame-stream')
+
+            self.channel.unsubscribe(lambda *args: None)
+            self.channel = None
+
+            self.log.info('grpc-channel-closed')
+
+    def _get_nni_port(self):
+        ports = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_NNI)
+        if ports:
+            # For now, we use on one NNI port
+            return ports[0]
+
+    @inlineCallbacks
+    def activate(self, device):
+        try:
+            self.log.info('activating')
+
+            if not device.host_and_port:
+                device.oper_status = OperStatus.FAILED
+                device.reason = 'No host_and_port field provided'
+                self.adapter_agent.device_update(device)
+                return
+
+            yield self.get_channel()
+            stub = ponsim_pb2.PonSimStub(self.channel)
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info, device_id=device.id)
+            self.ofp_port_no = info.nni_port
+
+            device.root = True
+            device.vendor = 'ponsim'
+            device.model = 'n/a'
+            device.serial_number = device.host_and_port
+            device.connect_status = ConnectStatus.REACHABLE
+            yield self.adapter_agent.device_update(device)
+
+            # Now set the initial PM configuration for this device
+            self.pm_metrics = AdapterPmMetrics(device)
+            pm_config = self.pm_metrics.make_proto()
+            log.info("initial-pm-config", pm_config=pm_config)
+            self.adapter_agent.device_pm_config_update(pm_config, init=True)
+
+            # Setup alarm handler
+            self.alarms = AdapterAlarms(self.adapter, device)
+
+            nni_port = Port(
+                port_no=info.nni_port,
+                label='NNI facing Ethernet port',
+                type=Port.ETHERNET_NNI,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            )
+            self.nni_port = nni_port
+            yield self.adapter_agent.port_created(device.id, nni_port)
+            yield self.adapter_agent.port_created(device.id, Port(
+                port_no=1,
+                label='PON port',
+                type=Port.PON_OLT,
+                admin_state=AdminState.ENABLED,
+                oper_status=OperStatus.ACTIVE
+            ))
+            yield self.adapter_agent.device_state_update(device.id, oper_status=OperStatus.ACTIVE)
+
+            # register ONUS
+            self.log.info('onu-found', onus=info.onus, len=len(info.onus))
+            for onu in info.onus:
+                vlan_id = onu.uni_port
+                yield self.adapter_agent.child_device_detected(
+                    parent_device_id=device.id,
+                    parent_port_no=1,
+                    child_device_type='ponsim_onu',
+                    channel_id=vlan_id,
+                )
+
+            self.log.info('starting-frame-grpc-stream')
+            reactor.callInThread(self.rcv_grpc)
+            self.log.info('started-frame-grpc-stream')
+
+            # TODO
+            # Start collecting stats from the device after a brief pause
+            # self.start_kpi_collection(device.id)
+        except Exception as e:
+            log.exception("Exception-activating", e=e)
+
+
+    def get_ofp_device_info(self, device):
+        return SwitchCapability(
+            desc=ofp_desc(
+                hw_desc='ponsim pon',
+                sw_desc='ponsim pon',
+                serial_num=device.serial_number,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                    OFPC_FLOW_STATS
+                    | OFPC_TABLE_STATS
+                    | OFPC_PORT_STATS
+                    | OFPC_GROUP_STATS
+                )
+            )
+        )
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no, info=self.ofp_port_no, device_id=device.id)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port = LogicalPort (
+                id='nni',
+                ofp_port=ofp_port(
+                    port_no=port_no,
+                    hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                    name='nni',
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                )
+            )
+        )
+
+    def reconcile(self, device):
+        self.log.info('reconciling-OLT-device-starts')
+
+        if not device.host_and_port:
+            device.oper_status = OperStatus.FAILED
+            device.reason = 'No host_and_port field provided'
+            self.adapter_agent.device_update(device)
+            return
+
+        try:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            # TODO: Verify we are connected to the same device we are
+            # reconciling - not much data in ponsim to differentiate at the
+            # time
+            device.oper_status = OperStatus.ACTIVE
+            self.adapter_agent.device_update(device)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+        except Exception, e:
+            log.exception('device-unreachable', e=e)
+            device.connect_status = ConnectStatus.UNREACHABLE
+            device.oper_status = OperStatus.UNKNOWN
+            self.adapter_agent.device_update(device)
+            return
+
+        # Now set the initial PM configuration for this device
+        self.pm_metrics = AdapterPmMetrics(device)
+        pm_config = self.pm_metrics.make_proto()
+        log.info("initial-pm-config", pm_config=pm_config)
+        self.adapter_agent.device_update_pm_config(pm_config, init=True)
+
+        # Setup alarm handler
+        self.alarms = AdapterAlarms(self.adapter, device)
+
+        # TODO: Is there anything required to verify nni and PON ports
+
+        # Set the logical device id
+        device = self.adapter_agent.get_device(device.id)
+        if device.parent_id:
+            self.logical_device_id = device.parent_id
+            self.adapter_agent.reconcile_logical_device(device.parent_id)
+        else:
+            self.log.info('no-logical-device-set')
+
+        # Reconcile child devices
+        self.adapter_agent.reconcile_child_devices(device.id)
+
+        reactor.callInThread(self.rcv_grpc)
+
+        # Start collecting stats from the device after a brief pause
+        self.start_kpi_collection(device.id)
+
+        self.log.info('reconciling-OLT-device-ends')
+
+    @inlineCallbacks
+    def rcv_grpc(self):
+        """
+        This call establishes a GRPC stream to receive frames.
+        """
+        yield self.get_channel()
+        stub = ponsim_pb2.PonSimStub(self.channel)
+        # stub = ponsim_pb2.PonSimStub(self.get_channel())
+
+        # Attempt to establish a grpc stream with the remote ponsim service
+        self.frames = stub.ReceiveFrames(Empty())
+
+        self.log.info('start-receiving-grpc-frames')
+
+        try:
+            for frame in self.frames:
+                self.log.info('received-grpc-frame',
+                              frame_len=len(frame.payload))
+                self._rcv_frame(frame.payload)
+
+        except _Rendezvous, e:
+            log.warn('grpc-connection-lost', message=e.message)
+
+        self.log.info('stopped-receiving-grpc-frames')
+
+
+    # VOLTHA's flow decomposition removes the information about which flows
+    # are trap flows where traffic should be forwarded to the controller.
+    # We'll go through the flows and change the output port of flows that we
+    # know to be trap flows to the OF CONTROLLER port.
+    def update_flow_table(self, flows):
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        self.log.info('pushing-olt-flow-table')
+        for flow in flows:
+            classifier_info = {}
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.ETH_TYPE:
+                    classifier_info['eth_type'] = field.eth_type
+                    self.log.debug('field-type-eth-type',
+                                   eth_type=classifier_info['eth_type'])
+                elif field.type == fd.IP_PROTO:
+                    classifier_info['ip_proto'] = field.ip_proto
+                    self.log.debug('field-type-ip-proto',
+                                   ip_proto=classifier_info['ip_proto'])
+            if ('ip_proto' in classifier_info and (
+                    classifier_info['ip_proto'] == 17 or
+                    classifier_info['ip_proto'] == 2)) or (
+                    'eth_type' in classifier_info and
+                    classifier_info['eth_type'] == 0x888e):
+                for action in fd.get_actions(flow):
+                    if action.type == ofp.OFPAT_OUTPUT:
+                        action.output.port = ofp.OFPP_CONTROLLER
+            self.log.info('out_port', out_port=fd.get_out_port(flow))
+
+        stub.UpdateFlowTable(FlowTable(
+            port=0,
+            flows=flows
+        ))
+        self.log.info('success')
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def update_pm_config(self, device, pm_config):
+        log.info("handler-update-pm-config", device=device,
+                 pm_config=pm_config)
+        self.pm_metrics.update(pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        self.log.info('sending-proxied-message')
+        if isinstance(msg, FlowTable):
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            self.log.info('pushing-onu-flow-table', port=msg.port)
+            res = stub.UpdateFlowTable(msg)
+            self.adapter_agent.receive_proxied_message(proxy_address, res)
+
+    def packet_out(self, egress_port, msg):
+        self.log.info('sending-packet-out', egress_port=egress_port,
+                      msg=hexify(msg))
+        pkt = Ether(msg)
+        out_pkt = pkt
+        if egress_port != self.nni_port.port_no:
+            # don't do the vlan manipulation for the NNI port, vlans are already correct
+            out_pkt = (
+                    Ether(src=pkt.src, dst=pkt.dst) /
+                    Dot1Q(vlan=egress_port, type=pkt.type) /
+                    pkt.payload
+            )
+
+        # TODO need better way of mapping logical ports to PON ports
+        out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
+
+        # send over grpc stream
+        stub = ponsim_pb2.PonSimStub(self.get_channel())
+        frame = PonSimFrame(id=self.device_id, payload=str(out_pkt), out_port=out_port)
+        stub.SendFrame(frame)
+
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        # Update the operational status to ACTIVATING and connect status to
+        # UNREACHABLE
+        device = self.adapter_agent.get_device(self.device_id)
+        previous_oper_status = device.oper_status
+        previous_conn_status = device.connect_status
+        device.oper_status = OperStatus.ACTIVATING
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to UNREACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.UNREACHABLE)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the operational status back to its previous state.  With a
+        # real OLT the operational state should be the state the device is
+        # after a reboot.
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = previous_oper_status
+        device.connect_status = previous_conn_status
+        self.adapter_agent.device_update(device)
+
+        # Update the child devices connect state to REACHABLE
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      connect_status=ConnectStatus.REACHABLE)
+
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        self.stop_kpi_collection()
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Update the operational status to UNKNOWN
+        device.oper_status = OperStatus.UNKNOWN
+        device.connect_status = ConnectStatus.UNREACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Remove the logical device
+        logical_device = self.adapter_agent.get_logical_device(
+            self.logical_device_id)
+        self.adapter_agent.delete_logical_device(logical_device)
+
+        # Disable all child devices first
+        self.adapter_agent.update_child_devices_state(self.device_id,
+                                                      admin_state=AdminState.DISABLED)
+
+        # Remove the peer references from this device
+        self.adapter_agent.delete_all_peer_references(self.device_id)
+
+        # Set all ports to disabled
+        self.adapter_agent.disable_all_ports(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        #  Update the logice device mapping
+        if self.logical_device_id in \
+                self.adapter.logical_device_id_to_root_device_id:
+            del self.adapter.logical_device_id_to_root_device_id[
+                self.logical_device_id]
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=device.id)
+
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+
+        # Get the latest device reference
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Set the ofp_port_no and nni_port in case we bypassed the reconcile
+        # process if the device was in DISABLED state on voltha restart
+        if not self.ofp_port_no and not self.nni_port:
+            stub = ponsim_pb2.PonSimStub(self.get_channel())
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            self.ofp_port_no = info.nni_port
+            self.nni_port = self._get_nni_port()
+
+        # Update the connect status to REACHABLE
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.device_update(device)
+
+        # Set all ports to enabled
+        self.adapter_agent.enable_all_ports(self.device_id)
+
+        ld = LogicalDevice(
+            # not setting id and datapth_id will let the adapter agent pick id
+            desc=ofp_desc(
+                hw_desc='simulated pon',
+                sw_desc='simulated pon',
+                serial_num=uuid4().hex,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                        OFPC_FLOW_STATS
+                        | OFPC_TABLE_STATS
+                        | OFPC_PORT_STATS
+                        | OFPC_GROUP_STATS
+                )
+            ),
+            root_device_id=device.id
+        )
+        mac_address = "AA:BB:CC:DD:EE:FF"
+        ld_initialized = self.adapter_agent.create_logical_device(ld,
+                                                                  dpid=mac_address)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        self.adapter_agent.add_logical_port(ld_initialized.id, LogicalPort(
+            id='nni',
+            ofp_port=ofp_port(
+                port_no=self.ofp_port_no,
+                hw_addr=mac_str_to_tuple(
+                    '00:00:00:00:00:%02x' % self.ofp_port_no),
+                name='nni',
+                config=0,
+                state=OFPPS_LIVE,
+                curr=cap,
+                advertised=cap,
+                peer=cap,
+                curr_speed=OFPPF_1GB_FD,
+                max_speed=OFPPF_1GB_FD
+            ),
+            device_id=device.id,
+            device_port_no=self.nni_port.port_no,
+            root_port=True
+        ))
+
+        device = self.adapter_agent.get_device(device.id)
+        device.parent_id = ld_initialized.id
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.device_update(device)
+        self.logical_device_id = ld_initialized.id
+
+        # Reenable all child devices
+        self.adapter_agent.update_child_devices_state(device.id,
+                                                      admin_state=AdminState.ENABLED)
+
+        # establish frame grpc-stream
+        reactor.callInThread(self.rcv_grpc)
+
+        self.start_kpi_collection(device.id)
+
+        self.log.info('re-enabled', device_id=device.id)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        # Remove all child devices
+        self.adapter_agent.delete_all_child_devices(self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
+
+    def start_kpi_collection(self, device_id):
+
+        def _collect(device_id, prefix):
+
+            try:
+                # Step 1: gather metrics from device
+                port_metrics = \
+                    self.pm_metrics.collect_port_metrics(self.get_channel())
+
+                # Step 2: prepare the KpiEvent for submission
+                # we can time-stamp them here (or could use time derived from OLT
+                ts = arrow.utcnow().timestamp
+                kpi_event = KpiEvent(
+                    type=KpiEventType.slice,
+                    ts=ts,
+                    prefixes={
+                        # OLT NNI port
+                        prefix + '.nni': MetricValuePairs(
+                            metrics=port_metrics['nni']),
+                        # OLT PON port
+                        prefix + '.pon': MetricValuePairs(
+                            metrics=port_metrics['pon'])
+                    }
+                )
+
+                # Step 3: submit
+                self.adapter_agent.submit_kpis(kpi_event)
+
+            except Exception as e:
+                log.exception('failed-to-submit-kpis', e=e)
+
+        self.pm_metrics.start_collector(_collect)
+
+    def stop_kpi_collection(self):
+        self.pm_metrics.stop_collector()
diff --git a/adapters/ponsim_olt/ponsim_olt.yml b/adapters/ponsim_olt/ponsim_olt.yml
new file mode 100644
index 0000000..fdb647a
--- /dev/null
+++ b/adapters/ponsim_olt/ponsim_olt.yml
@@ -0,0 +1,52 @@
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: ponsim_olt.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+