Initial commit moving openolt adapter from voltha-go to the new repo.
This version works with ponsim rather than openolt, this is temporary.
It is currently being fixed to work with openolt.

Change-Id: I34a800c98f050140b367e2d474b7aa8b79f34b9a
Signed-off-by: William Kurkian <wkurkian@cisco.com>
diff --git a/python/adapters/openolt/VERSION b/python/adapters/openolt/VERSION
new file mode 100644
index 0000000..c0ab82c
--- /dev/null
+++ b/python/adapters/openolt/VERSION
@@ -0,0 +1 @@
+0.0.1-dev
diff --git a/python/adapters/openolt/__init__.py b/python/adapters/openolt/__init__.py
new file mode 100644
index 0000000..4a82628
--- /dev/null
+++ b/python/adapters/openolt/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2018-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/python/adapters/openolt/main.py b/python/adapters/openolt/main.py
new file mode 100755
index 0000000..273ff13
--- /dev/null
+++ b/python/adapters/openolt/main.py
@@ -0,0 +1,500 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""Ponsim OLT Adapter main entry point"""
+
+import argparse
+import os
+import time
+
+import arrow
+import yaml
+from packaging.version import Version
+from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+from zope.interface import implementer
+
+from python.common.structlog_setup import setup_logging, update_logging
+from python.common.utils.asleep import asleep
+from python.common.utils.deferred_utils import TimeOutError
+from python.common.utils.dockerhelpers import get_my_containers_name
+from python.common.utils.nethelpers import get_my_primary_local_ipv4, \
+    get_my_primary_interface
+from python.common.utils.registry import registry, IComponent
+from python.adapters.kafka.adapter_proxy import AdapterProxy
+from python.adapters.kafka.adapter_request_facade import AdapterRequestFacade
+from python.adapters.kafka.core_proxy import CoreProxy
+from python.adapters.kafka.kafka_inter_container_library import IKafkaMessagingProxy, \
+    get_messaging_proxy
+from python.adapters.kafka.kafka_proxy import KafkaProxy, get_kafka_proxy
+from openolt import OpenoltAdapter
+from python.protos import third_party
+from python.protos.adapter_pb2 import AdapterConfig
+
+_ = third_party
+
+defs = dict(
+    version_file='./VERSION',
+    config=os.environ.get('CONFIG', './openolt.yml'),
+    container_name_regex=os.environ.get('CONTAINER_NUMBER_EXTRACTOR', '^.*\.(['
+                                                                      '0-9]+)\..*$'),
+    consul=os.environ.get('CONSUL', 'localhost:8500'),
+    name=os.environ.get('NAME', 'openolt'),
+    vendor=os.environ.get('VENDOR', 'Voltha Project'),
+    device_type=os.environ.get('DEVICE_TYPE', 'openoltolt'),
+    accept_bulk_flow=os.environ.get('ACCEPT_BULK_FLOW', True),
+    accept_atomic_flow=os.environ.get('ACCEPT_ATOMIC_FLOW', True),
+    etcd=os.environ.get('ETCD', 'localhost:2379'),
+    core_topic=os.environ.get('CORE_TOPIC', 'rwcore'),
+    interface=os.environ.get('INTERFACE', get_my_primary_interface()),
+    instance_id=os.environ.get('INSTANCE_ID', os.environ.get('HOSTNAME', '1')),
+    kafka_adapter=os.environ.get('KAFKA_ADAPTER', '192.168.0.20:9092'),
+    kafka_cluster=os.environ.get('KAFKA_CLUSTER', '10.100.198.220:9092'),
+    backend=os.environ.get('BACKEND', 'none'),
+    retry_interval=os.environ.get('RETRY_INTERVAL', 2),
+    heartbeat_topic=os.environ.get('HEARTBEAT_TOPIC', "adapters.heartbeat"),
+)
+
+
+def parse_args():
+    parser = argparse.ArgumentParser()
+
+    _help = ('Path to openolt.yml config file (default: %s). '
+             'If relative, it is relative to main.py of openolt adapter.'
+             % defs['config'])
+    parser.add_argument('-c', '--config',
+                        dest='config',
+                        action='store',
+                        default=defs['config'],
+                        help=_help)
+
+    _help = 'Regular expression for extracting conatiner number from ' \
+            'container name (default: %s)' % defs['container_name_regex']
+    parser.add_argument('-X', '--container-number-extractor',
+                        dest='container_name_regex',
+                        action='store',
+                        default=defs['container_name_regex'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to consul agent (default: %s)' % defs['consul']
+    parser.add_argument('-C', '--consul',
+                        dest='consul',
+                        action='store',
+                        default=defs['consul'],
+                        help=_help)
+
+    _help = 'name of this adapter (default: %s)' % defs['name']
+    parser.add_argument('-na', '--name',
+                        dest='name',
+                        action='store',
+                        default=defs['name'],
+                        help=_help)
+
+    _help = 'vendor of this adapter (default: %s)' % defs['vendor']
+    parser.add_argument('-ven', '--vendor',
+                        dest='vendor',
+                        action='store',
+                        default=defs['vendor'],
+                        help=_help)
+
+    _help = 'supported device type of this adapter (default: %s)' % defs[
+        'device_type']
+    parser.add_argument('-dt', '--device_type',
+                        dest='device_type',
+                        action='store',
+                        default=defs['device_type'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts bulk flow updates ' \
+            'adapter (default: %s)' % defs['accept_bulk_flow']
+    parser.add_argument('-abf', '--accept_bulk_flow',
+                        dest='accept_bulk_flow',
+                        action='store',
+                        default=defs['accept_bulk_flow'],
+                        help=_help)
+
+    _help = 'specifies whether the device type accepts add/remove flow ' \
+            '(default: %s)' % defs['accept_atomic_flow']
+    parser.add_argument('-aaf', '--accept_atomic_flow',
+                        dest='accept_atomic_flow',
+                        action='store',
+                        default=defs['accept_atomic_flow'],
+                        help=_help)
+
+    _help = '<hostname>:<port> to etcd server (default: %s)' % defs['etcd']
+    parser.add_argument('-e', '--etcd',
+                        dest='etcd',
+                        action='store',
+                        default=defs['etcd'],
+                        help=_help)
+
+    _help = ('unique string id of this container instance (default: %s)'
+             % defs['instance_id'])
+    parser.add_argument('-i', '--instance-id',
+                        dest='instance_id',
+                        action='store',
+                        default=defs['instance_id'],
+                        help=_help)
+
+    _help = 'ETH interface to recieve (default: %s)' % defs['interface']
+    parser.add_argument('-I', '--interface',
+                        dest='interface',
+                        action='store',
+                        default=defs['interface'],
+                        help=_help)
+
+    _help = 'omit startup banner log lines'
+    parser.add_argument('-n', '--no-banner',
+                        dest='no_banner',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = 'do not emit periodic heartbeat log messages'
+    parser.add_argument('-N', '--no-heartbeat',
+                        dest='no_heartbeat',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = "suppress debug and info logs"
+    parser.add_argument('-q', '--quiet',
+                        dest='quiet',
+                        action='count',
+                        help=_help)
+
+    _help = 'enable verbose logging'
+    parser.add_argument('-v', '--verbose',
+                        dest='verbose',
+                        action='count',
+                        help=_help)
+
+    _help = ('use docker container name as conatiner instance id'
+             ' (overrides -i/--instance-id option)')
+    parser.add_argument('--instance-id-is-container-name',
+                        dest='instance_id_is_container_name',
+                        action='store_true',
+                        default=False,
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka adapter broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_adapter'])
+    parser.add_argument('-KA', '--kafka_adapter',
+                        dest='kafka_adapter',
+                        action='store',
+                        default=defs['kafka_adapter'],
+                        help=_help)
+
+    _help = ('<hostname>:<port> of the kafka cluster broker (default: %s). ('
+             'If not '
+             'specified (None), the address from the config file is used'
+             % defs['kafka_cluster'])
+    parser.add_argument('-KC', '--kafka_cluster',
+                        dest='kafka_cluster',
+                        action='store',
+                        default=defs['kafka_cluster'],
+                        help=_help)
+
+    _help = 'backend to use for config persitence'
+    parser.add_argument('-b', '--backend',
+                        default=defs['backend'],
+                        choices=['none', 'consul', 'etcd'],
+                        help=_help)
+
+    _help = 'topic of core on the kafka bus'
+    parser.add_argument('-ct', '--core_topic',
+                        dest='core_topic',
+                        action='store',
+                        default=defs['core_topic'],
+                        help=_help)
+
+    args = parser.parse_args()
+
+    # post-processing
+
+    if args.instance_id_is_container_name:
+        args.instance_id = get_my_containers_name()
+
+    return args
+
+
+def load_config(args):
+    path = args.config
+    if path.startswith('.'):
+        dir = os.path.dirname(os.path.abspath(__file__))
+        path = os.path.join(dir, path)
+    path = os.path.abspath(path)
+    with open(path) as fd:
+        config = yaml.load(fd)
+    return config
+
+
+
+
+def print_banner(log):
+    log.info(' ____ _____  ___ _    ___   ___  _   _____   ')
+    log.info('/  _ \|  _ \| __| |  /   | / _ \| | |_   _|  ')
+    log.info('| | | | | )_| | | | / /| |  | | | | |   | |  ')
+    log.info('| |_| | __/ |_|_| |/ / | |  |_| | |___| |    ')
+    log.info('\____/|_|   |___|_|    |_| \___/|_____|_|    ')
+    log.info('                                                      ')
+    log.info('   _       _             _                            ')
+    log.info('  / \   __| | __ _ _ __ | |_ ___ _ __                 ')
+    log.info('  / _ \ / _` |/ _` | \'_ \| __/ _ \ \'__|             ')
+    log.info(' / ___ \ (_| | (_| | |_) | ||  __/ |                  ')
+    log.info('/_/   \_\__,_|\__,_| .__/ \__\___|_|                  ')
+    log.info('                   |_|                                ')
+    log.info('(to stop: press Ctrl-C)')
+
+@implementer(IComponent)
+class Main(object):
+
+    def __init__(self):
+
+        self.args = args = parse_args()
+        self.config = load_config(args)
+
+        verbosity_adjust = (args.verbose or 0) - (args.quiet or 0)
+        self.log = setup_logging(self.config.get('logging', {}),
+                                 args.instance_id,
+                                 verbosity_adjust=verbosity_adjust)
+        self.log.info('container-number-extractor',
+                      regex=args.container_name_regex)
+
+        self.openolt_adapter_version = self.get_version()
+        self.log.info('Open-OLT-Adapter-Version', version=
+        self.openolt_adapter_version)
+
+        if not args.no_banner:
+            print_banner(self.log)
+
+        self.adapter = None
+        # Create a unique instance id using the passed-in instance id and
+        # UTC timestamp
+        current_time = arrow.utcnow().timestamp
+        self.instance_id = self.args.instance_id + '_' + str(current_time)
+
+        self.core_topic = args.core_topic
+        self.listening_topic = args.name
+        self.startup_components()
+
+        if not args.no_heartbeat:
+            self.start_heartbeat()
+            self.start_kafka_cluster_heartbeat(self.instance_id)
+
+    def get_version(self):
+        path = defs['version_file']
+        if not path.startswith('/'):
+            dir = os.path.dirname(os.path.abspath(__file__))
+            path = os.path.join(dir, path)
+
+        path = os.path.abspath(path)
+        version_file = open(path, 'r')
+        v = version_file.read()
+
+        # Use Version to validate the version string - exception will be raised
+        # if the version is invalid
+        Version(v)
+
+        version_file.close()
+        return v
+
+    def start(self):
+        self.start_reactor()  # will not return except Keyboard interrupt
+
+    def stop(self):
+        pass
+
+    def get_args(self):
+        """Allow access to command line args"""
+        return self.args
+
+    def get_config(self):
+        """Allow access to content of config file"""
+        return self.config
+
+    def _get_adapter_config(self):
+        cfg = AdapterConfig()
+        return cfg
+
+    @inlineCallbacks
+    def startup_components(self):
+        try:
+            self.log.info('starting-internal-components',
+                          consul=self.args.consul,
+                          etcd=self.args.etcd)
+
+            registry.register('main', self)
+
+            # Update the logger to output the vcore id.
+            self.log = update_logging(instance_id=self.instance_id,
+                                      vcore_id=None)
+
+            yield registry.register(
+                'kafka_cluster_proxy',
+                KafkaProxy(
+                    self.args.consul,
+                    self.args.kafka_cluster,
+                    config=self.config.get('kafka-cluster-proxy', {})
+                )
+            ).start()
+
+            config = self._get_adapter_config()
+
+            self.core_proxy = CoreProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            self.adapter_proxy = AdapterProxy(
+                kafka_proxy=None,
+                core_topic=self.core_topic,
+                my_listening_topic=self.listening_topic)
+
+            self.adapter = OpenoltAdapter(core_proxy=self.core_proxy,
+                                            adapter_proxy=self.adapter_proxy,
+                                            config=config)
+
+            openolt_request_handler = AdapterRequestFacade(adapter=self.adapter)
+
+            yield registry.register(
+                'kafka_adapter_proxy',
+                IKafkaMessagingProxy(
+                    kafka_host_port=self.args.kafka_adapter,
+                    # TODO: Add KV Store object reference
+                    kv_store=self.args.backend,
+                    default_topic=self.args.name,
+		    group_id_prefix=self.args.instance_id,
+                    # Needs to assign a real class
+                    target_cls=openolt_request_handler
+
+                )
+            ).start()
+
+            self.core_proxy.kafka_proxy = get_messaging_proxy()
+            self.adapter_proxy.kafka_proxy = get_messaging_proxy()
+
+            # retry for ever
+            res = yield self._register_with_core(-1)
+
+            self.log.info('started-internal-services')
+
+        except Exception as e:
+            self.log.exception('Failure-to-start-all-components', e=e)
+
+    @inlineCallbacks
+    def shutdown_components(self):
+        """Execute before the reactor is shut down"""
+        self.log.info('exiting-on-keyboard-interrupt')
+        for component in reversed(registry.iterate()):
+            yield component.stop()
+
+        import threading
+        self.log.info('THREADS:')
+        main_thread = threading.current_thread()
+        for t in threading.enumerate():
+            if t is main_thread:
+                continue
+            if not t.isDaemon():
+                continue
+            self.log.info('joining thread {} {}'.format(
+                t.getName(), "daemon" if t.isDaemon() else "not-daemon"))
+            t.join()
+
+    def start_reactor(self):
+        from twisted.internet import reactor
+        reactor.callWhenRunning(
+            lambda: self.log.info('twisted-reactor-started'))
+        reactor.addSystemEventTrigger('before', 'shutdown',
+                                      self.shutdown_components)
+        reactor.run()
+
+    @inlineCallbacks
+    def _register_with_core(self, retries):
+        while 1:
+            try:
+                resp = yield self.core_proxy.register(
+                    self.adapter.adapter_descriptor(),
+                    self.adapter.device_types())
+                if resp:
+                    self.log.info('registered-with-core',
+                                  coreId=resp.instance_id)
+                returnValue(resp)
+            except TimeOutError as e:
+                self.log.warn("timeout-when-registering-with-core", e=e)
+                if retries == 0:
+                    self.log.exception("no-more-retries", e=e)
+                    raise
+                else:
+                    retries = retries if retries < 0 else retries - 1
+                    yield asleep(defs['retry_interval'])
+            except Exception as e:
+                self.log.exception("failed-registration", e=e)
+                raise
+
+    def start_heartbeat(self):
+
+        t0 = time.time()
+        t0s = time.ctime(t0)
+
+        def heartbeat():
+            self.log.debug(status='up', since=t0s, uptime=time.time() - t0)
+
+        lc = LoopingCall(heartbeat)
+        lc.start(10)
+
+    # Temporary function to send a heartbeat message to the external kafka
+    # broker
+    def start_kafka_cluster_heartbeat(self, instance_id):
+        # For heartbeat we will send a message to a specific "voltha-heartbeat"
+        #  topic.  The message is a protocol buf
+        # message
+        message = dict(
+            type='heartbeat',
+            adapter=self.args.name,
+            instance=instance_id,
+            ip=get_my_primary_local_ipv4()
+        )
+        topic = defs['heartbeat_topic']
+
+        def send_msg(start_time):
+            try:
+                kafka_cluster_proxy = get_kafka_proxy()
+                if kafka_cluster_proxy and not kafka_cluster_proxy.is_faulty():
+                    # self.log.debug('kafka-proxy-available')
+                    message['ts'] = arrow.utcnow().timestamp
+                    message['uptime'] = time.time() - start_time
+                    # self.log.debug('start-kafka-heartbeat')
+                    kafka_cluster_proxy.send_message(topic, dumps(message))
+                else:
+                    self.log.error('kafka-proxy-unavailable')
+            except Exception, e:
+                self.log.exception('failed-sending-message-heartbeat', e=e)
+
+        try:
+            t0 = time.time()
+            lc = LoopingCall(send_msg, t0)
+            lc.start(10)
+        except Exception, e:
+            self.log.exception('failed-kafka-heartbeat', e=e)
+
+
+if __name__ == '__main__':
+    Main().start()
diff --git a/python/adapters/openolt/openolt.py b/python/adapters/openolt/openolt.py
new file mode 100644
index 0000000..2c87730
--- /dev/null
+++ b/python/adapters/openolt/openolt.py
@@ -0,0 +1,897 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Openolt adapter.
+"""
+import arrow
+import grpc
+import structlog
+from google.protobuf.empty_pb2 import Empty
+from google.protobuf.json_format import MessageToDict
+from scapy.layers.inet import Raw
+import json
+from google.protobuf.message import Message
+from grpc._channel import _Rendezvous
+from scapy.layers.l2 import Ether, Dot1Q
+from simplejson import dumps
+from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
+from twisted.internet.task import LoopingCall
+
+from python.adapters.common.frameio.frameio import BpfProgramFilter, hexify
+from python.adapters.iadapter import OltAdapter
+from python.common.utils.asleep import asleep
+from python.common.utils.registry import registry
+from python.adapters.kafka.kafka_proxy import get_kafka_proxy
+from python.protos import openolt_pb2
+from python.protos import third_party
+from python.protos.common_pb2 import OperStatus, ConnectStatus
+from python.protos.common_pb2 import LogLevel
+from python.protos.common_pb2 import OperationResp
+from python.protos.inter_container_pb2 import SwitchCapability, PortCapability, \
+    InterAdapterMessageType, InterAdapterResponseBody
+from python.protos.device_pb2 import Port, PmConfig, PmConfigs, \
+    DeviceType, DeviceTypes
+from python.protos.adapter_pb2 import Adapter
+from python.protos.adapter_pb2 import AdapterConfig
+
+ 
+from python.protos.events_pb2 import KpiEvent, KpiEventType, MetricValuePairs
+from python.protos.logical_device_pb2 import LogicalPort
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPF_1GB_FD, \
+    OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
+    ofp_switch_features, ofp_desc
+from python.protos.openflow_13_pb2 import ofp_port
+from python.protos.ponsim_pb2 import FlowTable, PonSimFrame, PonSimMetricsRequest, PonSimStub
+
+_ = third_party
+log = structlog.get_logger()
+#OpenOltDefaults = {
+#    'support_classes': {
+#        'platform': OpenOltPlatform,
+#        'resource_mgr': OpenOltResourceMgr,
+#        'flow_mgr': OpenOltFlowMgr,
+#        'alarm_mgr': OpenOltAlarmMgr,
+#        'stats_mgr': OpenOltStatisticsMgr,
+#        'bw_mgr': OpenOltBW
+#    }
+#}
+
+class AdapterPmMetrics:
+    def __init__(self, device):
+        self.pm_names = {'tx_64_pkts', 'tx_65_127_pkts', 'tx_128_255_pkts',
+                         'tx_256_511_pkts', 'tx_512_1023_pkts',
+                         'tx_1024_1518_pkts', 'tx_1519_9k_pkts',
+                         'rx_64_pkts', 'rx_65_127_pkts',
+                         'rx_128_255_pkts', 'rx_256_511_pkts',
+                         'rx_512_1023_pkts', 'rx_1024_1518_pkts',
+                         'rx_1519_9k_pkts'}
+        self.device = device
+        self.id = device.id
+        self.name = 'ponsim_olt'
+        self.default_freq = 150
+        self.grouped = False
+        self.freq_override = False
+        self.pon_metrics_config = dict()
+        self.nni_metrics_config = dict()
+        self.lc = None
+        for m in self.pm_names:
+            self.pon_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+            self.nni_metrics_config[m] = PmConfig(name=m,
+                                                  type=PmConfig.COUNTER,
+                                                  enabled=True)
+
+    def update(self, pm_config):
+        if self.default_freq != pm_config.default_freq:
+            # Update the callback to the new frequency.
+            self.default_freq = pm_config.default_freq
+            self.lc.stop()
+            self.lc.start(interval=self.default_freq / 10)
+        for m in pm_config.metrics:
+            self.pon_metrics_config[m.name].enabled = m.enabled
+            self.nni_metrics_config[m.name].enabled = m.enabled
+
+    def make_proto(self):
+        pm_config = PmConfigs(
+            id=self.id,
+            default_freq=self.default_freq,
+            grouped=False,
+            freq_override=False)
+        for m in sorted(self.pon_metrics_config):
+            pm = self.pon_metrics_config[m]  # Either will do they're the same
+            pm_config.metrics.extend([PmConfig(name=pm.name,
+                                               type=pm.type,
+                                               enabled=pm.enabled)])
+        return pm_config
+
+    def collect_port_metrics(self, channel):
+        rtrn_port_metrics = dict()
+        stub = ponsim_pb2.PonSimStub(channel)
+        stats = stub.GetStats(Empty())
+        rtrn_port_metrics['pon'] = self.extract_pon_metrics(stats)
+        rtrn_port_metrics['nni'] = self.extract_nni_metrics(stats)
+        return rtrn_port_metrics
+
+    def extract_pon_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "pon":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def extract_nni_metrics(self, stats):
+        rtrn_pon_metrics = dict()
+        for m in stats.metrics:
+            if m.port_name == "nni":
+                for p in m.packets:
+                    if self.pon_metrics_config[p.name].enabled:
+                        rtrn_pon_metrics[p.name] = p.value
+                return rtrn_pon_metrics
+
+    def start_collector(self, callback):
+        log.info("starting-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        prefix = 'voltha.{}.{}'.format(self.name, self.device.id)
+        self.lc = LoopingCall(callback, self.device.id, prefix)
+        self.lc.start(interval=self.default_freq / 10)
+
+    def stop_collector(self):
+        log.info("stopping-pm-collection", device_name=self.name,
+                 device_id=self.device.id)
+        self.lc.stop()
+
+
+class AdapterAlarms:
+    def __init__(self, adapter, device):
+        self.adapter = adapter
+        self.device = device
+        self.lc = None
+
+    # TODO: Implement code to send to kafka cluster directly instead of
+    # going through the voltha core.
+    def send_alarm(self, context_data, alarm_data):
+        log.debug("send-alarm-not-implemented")
+        return
+
+
+
+class OpenoltAdapter(OltAdapter):
+    name = 'openolt'
+
+    supported_device_types = [
+        DeviceType(
+            id=name,
+            adapter=name,
+            accepts_bulk_flow_update=True,
+            accepts_add_remove_flow_updates=True
+        )
+    ]
+
+    # System Init Methods #
+    def __init__(self, core_proxy, adapter_proxy, config):
+	super(OpenoltAdapter, self).__init__(core_proxy=core_proxy,
+                                               adapter_proxy=adapter_proxy,
+                                               config=config,
+                                               device_handler_class=OpenoltHandler,
+                                               name='openolt',
+                                               vendor='Voltha project',
+                                               version='0.4',
+                                               device_type='openolt',
+                                               accepts_bulk_flow_update=True,
+                                               accepts_add_remove_flow_updates=False)
+        self.adapter_proxy = adapter_proxy
+        self.core_proxy = core_proxy
+        self.config = config
+        self.descriptor = Adapter(
+            id=self.name,
+            vendor='OLT white box vendor',
+            version='0.1',
+            config=config
+        )
+        log.debug('openolt.__init__', adapter_proxy=adapter_proxy)
+        self.devices = dict()  # device_id -> OpenoltDevice()
+        self.interface = registry('main').get_args().interface
+        self.logical_device_id_to_root_device_id = dict()
+        self.num_devices = 0
+
+    def start(self):
+        log.info('started', interface=self.interface)
+
+    def stop(self):
+        log.info('stopped', interface=self.interface)
+
+
+    # Info Methods #
+    def adapter_descriptor(self):
+        log.debug('get descriptor', interface=self.interface)
+        return self.descriptor
+
+    def device_types(self):
+        log.debug('get device_types', interface=self.interface,
+                  items=self.supported_device_types)
+        return DeviceTypes(items=self.supported_device_types)
+
+    def health(self):
+        log.debug('get health', interface=self.interface)
+        raise NotImplementedError()
+
+    def get_device_details(self, device):
+        log.debug('get_device_details', device=device)
+        raise NotImplementedError()
+
+
+    # Device Operation Methods #
+    def change_master_state(self, master):
+        log.debug('change_master_state', interface=self.interface,
+                  master=master)
+        raise NotImplementedError()
+
+    def abandon_device(self, device):
+        log.info('abandon-device', device=device)
+        raise NotImplementedError()
+
+
+    # Configuration Methods #
+    def update_flows_incrementally(self, device, flow_changes, group_changes):
+        log.debug('update_flows_incrementally', device=device,
+                  flow_changes=flow_changes, group_changes=group_changes)
+        log.info('This device does not allow this, therefore it is Not '
+                 'implemented')
+        raise NotImplementedError()
+
+    def update_pm_config(self, device, pm_configs):
+        log.info('update_pm_config - Not implemented yet', device=device,
+                 pm_configs=pm_configs)
+        raise NotImplementedError()
+
+    def receive_proxied_message(self, proxy_address, msg):
+        log.debug('receive_proxied_message - Not implemented',
+                  proxy_address=proxy_address,
+                  proxied_msg=msg)
+        raise NotImplementedError()
+
+    def receive_inter_adapter_message(self, msg):
+        log.info('rx_inter_adapter_msg - Not implemented')
+        raise NotImplementedError()
+
+
+    # Image Operations Methods #
+    def download_image(self, device, request):
+        log.info('image_download - Not implemented yet', device=device,
+                 request=request)
+        raise NotImplementedError()
+
+    def get_image_download_status(self, device, request):
+        log.info('get_image_download - Not implemented yet', device=device,
+                 request=request)
+        raise NotImplementedError()
+
+    def cancel_image_download(self, device, request):
+        log.info('cancel_image_download - Not implemented yet', device=device)
+        raise NotImplementedError()
+
+    def activate_image_update(self, device, request):
+        log.info('activate_image_update - Not implemented yet',
+                 device=device, request=request)
+        raise NotImplementedError()
+
+    def revert_image_update(self, device, request):
+        log.info('revert_image_update - Not implemented yet',
+                 device=device, request=request)
+        raise NotImplementedError()
+
+    def self_test_device(self, device):
+        # from voltha.protos.voltha_pb2 import SelfTestResponse
+        log.info('Not implemented yet')
+        raise NotImplementedError()
+
+
+    # PON Operations Methods #
+    def create_interface(self, device, data):
+        log.debug('create-interface - Not implemented - We do not use this',
+                  data=data)
+        raise NotImplementedError()
+
+    def update_interface(self, device, data):
+        log.debug('update-interface - Not implemented - We do not use this',
+                  data=data)
+        raise NotImplementedError()
+
+    def remove_interface(self, device, data):
+        log.debug('remove-interface - Not implemented - We do not use this',
+                  data=data)
+        raise NotImplementedError()
+
+    def receive_onu_detect_state(self, proxy_address, state):
+        log.debug('receive-onu-detect-state - Not implemented - We do not '
+                  'use this', proxy_address=proxy_address,
+                  state=state)
+        raise NotImplementedError()
+
+    def create_tcont(self, device, tcont_data, traffic_descriptor_data):
+        log.info('create-tcont - Not implemented - We do not use this',
+                 tcont_data=tcont_data,
+                 traffic_descriptor_data=traffic_descriptor_data)
+        raise NotImplementedError()
+
+    def update_tcont(self, device, tcont_data, traffic_descriptor_data):
+        log.info('update-tcont - Not implemented - We do not use this',
+                 tcont_data=tcont_data,
+                 traffic_descriptor_data=traffic_descriptor_data)
+        raise NotImplementedError()
+
+    def remove_tcont(self, device, tcont_data, traffic_descriptor_data):
+        log.info('remove-tcont - Not implemented - We do not use this',
+                 tcont_data=tcont_data,
+                 traffic_descriptor_data=traffic_descriptor_data)
+        raise NotImplementedError()
+
+    def create_gemport(self, device, data):
+        log.info('create-gemport - Not implemented - We do not use this',
+                 data=data)
+        raise NotImplementedError()
+
+    def update_gemport(self, device, data):
+        log.info('update-gemport - Not implemented - We do not use this',
+                 data=data)
+        raise NotImplementedError()
+
+    def remove_gemport(self, device, data):
+        log.info('remove-gemport - Not implemented - We do not use this',
+                 data=data)
+        raise NotImplementedError()
+
+    def create_multicast_gemport(self, device, data):
+        log.info('create-mcast-gemport  - Not implemented - We do not use '
+                 'this', data=data)
+        raise NotImplementedError()
+
+    def update_multicast_gemport(self, device, data):
+        log.info('update-mcast-gemport - Not implemented - We do not use '
+                 'this', data=data)
+        raise NotImplementedError()
+
+    def remove_multicast_gemport(self, device, data):
+        log.info('remove-mcast-gemport - Not implemented - We do not use '
+                 'this', data=data)
+        raise NotImplementedError()
+
+    def create_multicast_distribution_set(self, device, data):
+        log.info('create-mcast-distribution-set - Not implemented - We do '
+                 'not use this', data=data)
+        raise NotImplementedError()
+
+    def update_multicast_distribution_set(self, device, data):
+        log.info('update-mcast-distribution-set - Not implemented - We do '
+                 'not use this', data=data)
+        raise NotImplementedError()
+
+    def remove_multicast_distribution_set(self, device, data):
+        log.info('remove-mcast-distribution-set - Not implemented - We do '
+                 'not use this', data=data)
+        raise NotImplementedError()
+
+
+    # Alarm Methods #
+    def suppress_alarm(self, filter):
+        log.info('suppress_alarm - Not implemented yet', filter=filter)
+        raise NotImplementedError()
+
+    def unsuppress_alarm(self, filter):
+        log.info('unsuppress_alarm - Not implemented yet', filter=filter)
+        raise NotImplementedError()
+
+class OpenoltHandler(object):
+    def __init__(self, adapter, device_id):
+        self.adapter = adapter
+        self.core_proxy = adapter.core_proxy
+        self.adapter_proxy = adapter.adapter_proxy
+        self.device_id = device_id
+        self.log = structlog.get_logger(device_id=device_id)
+        self.channel = None
+        self.io_port = None
+        self.logical_device_id = None
+        self.nni_port = None
+        self.ofp_port_no = None
+        self.interface = registry('main').get_args().interface
+        self.pm_metrics = None
+        self.alarms = None
+        self.frames = None
+
+    @inlineCallbacks
+    def get_channel(self):
+        if self.channel is None:
+            try:
+                device = yield self.core_proxy.get_device(self.device_id)
+                self.log.info('device-info', device=device,
+                              host_port=device.host_and_port)
+                self.channel = grpc.insecure_channel(device.host_and_port)
+            except Exception as e:
+                log.exception("ponsim-connection-failure", e=e)
+
+        # returnValue(self.channel)
+
+    def close_channel(self):
+        if self.channel is None:
+            self.log.info('grpc-channel-already-closed')
+            return
+        else:
+            if self.frames is not None:
+                self.frames.cancel()
+                self.frames = None
+                self.log.info('cancelled-grpc-frame-stream')
+
+            self.channel.unsubscribe(lambda *args: None)
+            self.channel = None
+
+            self.log.info('grpc-channel-closed')
+
+    @inlineCallbacks
+    def _get_nni_port(self):
+        ports = yield self.core_proxy.get_ports(self.device_id,
+                                                Port.ETHERNET_NNI)
+        returnValue(ports)
+
+    @inlineCallbacks
+    def activate(self, device):
+        try:
+            self.log.info('activating')
+	    print (dir(device))
+            if not device.host_and_port:
+                device.oper_status = OperStatus.FAILED
+                device.reason = 'No host_and_port field provided'
+                self.core_proxy.device_update(device)
+                return
+	    """        
+	    kwargs = {
+            	'support_classes': OpenOltDefaults['support_classes'],
+                'adapter_agent': self.adapter_proxy,
+                'device': device,
+                'device_num': self.num_devices + 1
+            }
+            try:
+                self.devices[device.id] = OpenoltDevice(**kwargs)
+            except Exception as e:
+                log.error('Failed to adopt OpenOLT device', error=e)
+                # TODO set status to ERROR so that is clear something went wrong
+                del self.devices[device.id]
+                raise
+            else:
+                self.num_devices += 1
+
+	    """
+            yield self.get_channel()
+            stub = PonSimStub(self.channel)
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info, device_id=device.id)
+            self.ofp_port_no = info.nni_port
+
+            device.root = True
+            device.vendor = 'ponsim'
+            device.model = 'n/a'
+            device.serial_number = device.host_and_port
+            device.mac_address = "AA:BB:CC:DD:EE:FF"
+            yield self.core_proxy.device_update(device)
+
+            # Now set the initial PM configuration for this device
+            self.pm_metrics = AdapterPmMetrics(device)
+            pm_config = self.pm_metrics.make_proto()
+            log.info("initial-pm-config", pm_config=pm_config)
+            self.core_proxy.device_pm_config_update(pm_config, init=True)
+
+            # Setup alarm handler
+            self.alarms = AdapterAlarms(self.adapter, device)
+
+            nni_port = Port(
+                port_no=info.nni_port,
+                label='NNI facing Ethernet port',
+                type=Port.ETHERNET_NNI,
+                oper_status=OperStatus.ACTIVE
+            )
+            self.nni_port = nni_port
+            yield self.core_proxy.port_created(device.id, nni_port)
+            yield self.core_proxy.port_created(device.id, Port(
+                port_no=1,
+                label='PON port',
+                type=Port.PON_OLT,
+                oper_status=OperStatus.ACTIVE
+            ))
+
+            yield self.core_proxy.device_state_update(device.id,
+                                                      connect_status=ConnectStatus.REACHABLE,
+                                                      oper_status=OperStatus.ACTIVE)
+
+            # register ONUS
+            self.log.info('onu-found', onus=info.onus, len=len(info.onus))
+            for onu in info.onus:
+                vlan_id = onu.uni_port
+                yield self.core_proxy.child_device_detected(
+                    parent_device_id=device.id,
+                    parent_port_no=1,
+                    child_device_type='ponsim_onu',
+                    channel_id=vlan_id,
+                )
+
+            self.log.info('starting-frame-grpc-stream')
+            reactor.callInThread(self.rcv_grpc)
+            self.log.info('started-frame-grpc-stream')
+
+            # Start collecting stats from the device after a brief pause
+            self.start_kpi_collection(device.id)
+        except Exception as e:
+            log.exception("Exception-activating", e=e)
+
+    def get_ofp_device_info(self, device):
+        return SwitchCapability(
+            desc=ofp_desc(
+                hw_desc='ponsim pon',
+                sw_desc='ponsim pon',
+                serial_num=device.serial_number,
+                dp_desc='n/a'
+            ),
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                        OFPC_FLOW_STATS
+                        | OFPC_TABLE_STATS
+                        | OFPC_PORT_STATS
+                        | OFPC_GROUP_STATS
+                )
+            )
+        )
+
+    def get_ofp_port_info(self, device, port_no):
+        # Since the adapter created the device port then it has the reference of the port to
+        # return the capability.   TODO:  Do a lookup on the NNI port number and return the
+        # appropriate attributes
+        self.log.info('get_ofp_port_info', port_no=port_no,
+                      info=self.ofp_port_no, device_id=device.id)
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        return PortCapability(
+            port=LogicalPort(
+                ofp_port=ofp_port(
+                    hw_addr=mac_str_to_tuple(
+                        'AA:BB:CC:DD:EE:%02x' % port_no),
+                    config=0,
+                    state=OFPPS_LIVE,
+                    curr=cap,
+                    advertised=cap,
+                    peer=cap,
+                    curr_speed=OFPPF_1GB_FD,
+                    max_speed=OFPPF_1GB_FD
+                ),
+                device_id=device.id,
+                device_port_no=port_no
+            )
+        )
+
+    # TODO - change for core 2.0
+    def reconcile(self, device):
+        self.log.info('reconciling-OLT-device')
+
+    @inlineCallbacks
+    def _rcv_frame(self, frame):
+        pkt = Ether(frame)
+
+        if pkt.haslayer(Dot1Q):
+            outer_shim = pkt.getlayer(Dot1Q)
+
+            if isinstance(outer_shim.payload, Dot1Q):
+                inner_shim = outer_shim.payload
+                cvid = inner_shim.vlan
+                popped_frame = (
+                        Ether(src=pkt.src, dst=pkt.dst, type=inner_shim.type) /
+                        inner_shim.payload
+                )
+                self.log.info('sending-packet-in',device_id=self.device_id, port=cvid)
+                yield self.core_proxy.send_packet_in(device_id=self.device_id,
+                                               port=cvid,
+                                               packet=str(popped_frame))
+            elif pkt.haslayer(Raw):
+                raw_data = json.loads(pkt.getlayer(Raw).load)
+                self.alarms.send_alarm(self, raw_data)
+
+    @inlineCallbacks
+    def rcv_grpc(self):
+        """
+        This call establishes a GRPC stream to receive frames.
+        """
+        yield self.get_channel()
+        stub = PonSimStub(self.channel)
+        # stub = PonSimStub(self.get_channel())
+
+        # Attempt to establish a grpc stream with the remote ponsim service
+        self.frames = stub.ReceiveFrames(Empty())
+
+        self.log.info('start-receiving-grpc-frames')
+
+        try:
+            for frame in self.frames:
+                self.log.info('received-grpc-frame',
+                              frame_len=len(frame.payload))
+                yield self._rcv_frame(frame.payload)
+
+        except _Rendezvous, e:
+            log.warn('grpc-connection-lost', message=e.message)
+
+        self.log.info('stopped-receiving-grpc-frames')
+
+    @inlineCallbacks
+    def update_flow_table(self, flows):
+        yield self.get_channel()
+        stub = PonSimStub(self.channel)
+
+        self.log.info('pushing-olt-flow-table')
+        stub.UpdateFlowTable(FlowTable(
+            port=0,
+            flows=flows
+        ))
+        self.log.info('success')
+
+    def remove_from_flow_table(self, flows):
+        self.log.debug('remove-from-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def add_to_flow_table(self, flows):
+        self.log.debug('add-to-flow-table', flows=flows)
+        # TODO: Update PONSIM code to accept incremental flow changes
+        # Once completed, the accepts_add_remove_flow_updates for this
+        # device type can be set to True
+
+    def update_pm_config(self, device, pm_config):
+        log.info("handler-update-pm-config", device=device,
+                 pm_config=pm_config)
+        self.pm_metrics.update(pm_config)
+
+    def send_proxied_message(self, proxy_address, msg):
+        self.log.info('sending-proxied-message')
+        if isinstance(msg, FlowTable):
+            stub = PonSimStub(self.get_channel())
+            self.log.info('pushing-onu-flow-table', port=msg.port)
+            res = stub.UpdateFlowTable(msg)
+            self.core_proxy.receive_proxied_message(proxy_address, res)
+
+    @inlineCallbacks
+    def process_inter_adapter_message(self, request):
+        self.log.info('process-inter-adapter-message', msg=request)
+        try:
+            if request.header.type == InterAdapterMessageType.FLOW_REQUEST:
+                f = FlowTable()
+                if request.body:
+                    request.body.Unpack(f)
+                    stub = PonSimStub(self.channel)
+                    self.log.info('pushing-onu-flow-table')
+                    res = stub.UpdateFlowTable(f)
+                    # Send response back
+                    reply = InterAdapterResponseBody()
+                    reply.status = True
+                    self.log.info('sending-response-back', reply=reply)
+                    yield self.adapter_proxy.send_inter_adapter_message(
+                        msg=reply,
+                        type=InterAdapterMessageType.FLOW_RESPONSE,
+                        from_adapter=self.adapter.name,
+                        to_adapter=request.header.from_topic,
+                        to_device_id=request.header.to_device_id,
+                        message_id=request.header.id
+                    )
+            elif request.header.type == InterAdapterMessageType.METRICS_REQUEST:
+                m = PonSimMetricsRequest()
+                if request.body:
+                    request.body.Unpack(m)
+                    stub = PonSimStub(self.channel)
+                    self.log.info('proxying onu stats request', port=m.port)
+                    res = stub.GetStats(m)
+                    # Send response back
+                    reply = InterAdapterResponseBody()
+                    reply.status = True
+                    reply.body.Pack(res)
+                    self.log.info('sending-response-back', reply=reply)
+                    yield self.adapter_proxy.send_inter_adapter_message(
+                        msg=reply,
+                        type=InterAdapterMessageType.METRICS_RESPONSE,
+                        from_adapter=self.adapter.name,
+                        to_adapter=request.header.from_topic,
+                        to_device_id=request.header.to_device_id,
+                        message_id=request.header.id
+                    )
+        except Exception as e:
+            self.log.exception("error-processing-inter-adapter-message", e=e)
+
+    def packet_out(self, egress_port, msg):
+        self.log.info('sending-packet-out', egress_port=egress_port,
+                      msg=hexify(msg))
+        try:
+            pkt = Ether(msg)
+            out_pkt = pkt
+            if egress_port != self.nni_port.port_no:
+                # don't do the vlan manipulation for the NNI port, vlans are already correct
+                out_pkt = (
+                        Ether(src=pkt.src, dst=pkt.dst) /
+                        Dot1Q(vlan=egress_port, type=pkt.type) /
+                        pkt.payload
+                )
+
+            # TODO need better way of mapping logical ports to PON ports
+            out_port = self.nni_port.port_no if egress_port == self.nni_port.port_no else 1
+
+            # send over grpc stream
+            stub = PonSimStub(self.channel)
+            frame = PonSimFrame(id=self.device_id, payload=str(out_pkt),
+                                out_port=out_port)
+            stub.SendFrame(frame)
+        except Exception as e:
+            self.log.exception("error-processing-packet-out", e=e)
+
+
+    @inlineCallbacks
+    def reboot(self):
+        self.log.info('rebooting', device_id=self.device_id)
+
+        yield self.core_proxy.device_state_update(self.device_id,
+                                                  connect_status=ConnectStatus.UNREACHABLE)
+
+        # Update the child devices connect state to UNREACHABLE
+        yield self.core_proxy.children_state_update(self.device_id,
+                                                    connect_status=ConnectStatus.UNREACHABLE)
+
+        # Sleep 10 secs, simulating a reboot
+        # TODO: send alert and clear alert after the reboot
+        yield asleep(10)
+
+        # Change the connection status back to REACHABLE.  With a
+        # real OLT the connection state must be the actual state
+        yield self.core_proxy.device_state_update(self.device_id,
+                                                  connect_status=ConnectStatus.REACHABLE)
+
+        # Update the child devices connect state to REACHABLE
+        yield self.core_proxy.children_state_update(self.device_id,
+                                                    connect_status=ConnectStatus.REACHABLE)
+
+        self.log.info('rebooted', device_id=self.device_id)
+
+    def self_test_device(self, device):
+        """
+        This is called to Self a device based on a NBI call.
+        :param device: A Voltha.Device object.
+        :return: Will return result of self test
+        """
+        log.info('self-test-device', device=device.id)
+        raise NotImplementedError()
+
+    @inlineCallbacks
+    def disable(self):
+        self.log.info('disabling', device_id=self.device_id)
+
+        self.stop_kpi_collection()
+
+        # Update the operational status to UNKNOWN and connection status to UNREACHABLE
+        yield self.core_proxy.device_state_update(self.device_id,
+                                                  oper_status=OperStatus.UNKNOWN,
+                                                  connect_status=ConnectStatus.UNREACHABLE)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        self.stop_kpi_collection()
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('disabled', device_id=self.device_id)
+
+    @inlineCallbacks
+    def reenable(self):
+        self.log.info('re-enabling', device_id=self.device_id)
+
+        # Set the ofp_port_no and nni_port in case we bypassed the reconcile
+        # process if the device was in DISABLED state on voltha restart
+        if not self.ofp_port_no and not self.nni_port:
+            yield self.get_channel()
+            stub = PonSimStub(self.channel)
+            info = stub.GetDeviceInfo(Empty())
+            log.info('got-info', info=info)
+            self.ofp_port_no = info.nni_port
+            ports = yield self._get_nni_port()
+            # For ponsim, we are using only 1 NNI port
+            if ports.items:
+                self.nni_port = ports.items[0]
+
+        # Update the state of the NNI port
+        yield self.core_proxy.port_state_update(self.device_id,
+                                                port_type=Port.ETHERNET_NNI,
+                                                port_no=self.ofp_port_no,
+                                                oper_status=OperStatus.ACTIVE)
+
+        # Update the state of the PON port
+        yield self.core_proxy.port_state_update(self.device_id,
+                                                port_type=Port.PON_OLT,
+                                                port_no=1,
+                                                oper_status=OperStatus.ACTIVE)
+
+        # Set the operational state of the device to ACTIVE and connect status to REACHABLE
+        yield self.core_proxy.device_state_update(self.device_id,
+                                                  connect_status=ConnectStatus.REACHABLE,
+                                                  oper_status=OperStatus.ACTIVE)
+
+        # TODO: establish frame grpc-stream
+        # yield reactor.callInThread(self.rcv_grpc)
+
+        self.start_kpi_collection(self.device_id)
+
+        self.log.info('re-enabled', device_id=self.device_id)
+
+    def delete(self):
+        self.log.info('deleting', device_id=self.device_id)
+
+        self.close_channel()
+        self.log.info('disabled-grpc-channel')
+
+        # TODO:
+        # 1) Remove all flows from the device
+        # 2) Remove the device from ponsim
+
+        self.log.info('deleted', device_id=self.device_id)
+
+    def start_kpi_collection(self, device_id):
+
+        kafka_cluster_proxy = get_kafka_proxy()
+
+        def _collect(device_id, prefix):
+
+            try:
+                # Step 1: gather metrics from device
+                port_metrics = \
+                    self.pm_metrics.collect_port_metrics(self.channel)
+
+                # Step 2: prepare the KpiEvent for submission
+                # we can time-stamp them here (or could use time derived from OLT
+                ts = arrow.utcnow().timestamp
+                kpi_event = KpiEvent(
+                    type=KpiEventType.slice,
+                    ts=ts,
+                    prefixes={
+                        # OLT NNI port
+                        prefix + '.nni': MetricValuePairs(
+                            metrics=port_metrics['nni']),
+                        # OLT PON port
+                        prefix + '.pon': MetricValuePairs(
+                            metrics=port_metrics['pon'])
+                    }
+                )
+
+                # Step 3: submit directly to the kafka bus
+                if kafka_cluster_proxy:
+                    if isinstance(kpi_event, Message):
+                        kpi_event = dumps(MessageToDict(kpi_event, True, True))
+                    kafka_cluster_proxy.send_message("voltha.kpis", kpi_event)
+
+            except Exception as e:
+                log.exception('failed-to-submit-kpis', e=e)
+
+        self.pm_metrics.start_collector(_collect)
+
+    def stop_kpi_collection(self):
+        self.pm_metrics.stop_collector()
diff --git a/python/adapters/openolt/openolt.yml b/python/adapters/openolt/openolt.yml
new file mode 100644
index 0000000..14f63bb
--- /dev/null
+++ b/python/adapters/openolt/openolt.yml
@@ -0,0 +1,67 @@
+---
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+logging:
+    version: 1
+
+    formatters:
+      brief:
+        format: '%(message)s'
+      default:
+        format: '%(asctime)s.%(msecs)03d %(levelname)-8s %(threadName)s %(module)s.%(funcName)s %(message)s'
+        datefmt: '%Y%m%dT%H%M%S'
+
+    handlers:
+        console:
+            class : logging.StreamHandler
+            level: DEBUG
+            formatter: default
+            stream: ext://sys.stdout
+        localRotatingFile:
+            class: logging.handlers.RotatingFileHandler
+            filename: openolt.log
+            formatter: default
+            maxBytes: 2097152
+            backupCount: 10
+            level: DEBUG
+        null:
+            class: logging.NullHandler
+
+    loggers:
+        amqp:
+            handlers: [null]
+            propagate: False
+        conf:
+            propagate: False
+        '': # root logger
+            handlers: [console, localRotatingFile]
+            level: DEBUG # this can be bumped up/down by -q and -v command line
+                        # options
+            propagate: False
+
+
+kafka-cluster-proxy:
+    event_bus_publisher:
+        topic_mappings:
+            'model-change-events':
+                kafka_topic: 'voltha.events'
+                filters:     [null]
+            'alarms':
+                kafka_topic: 'voltha.alarms'
+                filters:     [null]
+            'kpis':
+                kafka_topic: 'voltha.kpis'
+                filters:     [null]
+
diff --git a/python/adapters/openolt/openolt_alarms.py b/python/adapters/openolt/openolt_alarms.py
new file mode 100644
index 0000000..764a013
--- /dev/null
+++ b/python/adapters/openolt/openolt_alarms.py
@@ -0,0 +1,466 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import arrow
+from voltha.extensions.alarms.adapter_alarms import AdapterAlarms
+from voltha.extensions.alarms.simulator.simulate_alarms import AdapterAlarmSimulator
+from voltha.extensions.alarms.olt.olt_los_alarm import OltLosAlarm
+from voltha.extensions.alarms.onu.onu_dying_gasp_alarm import OnuDyingGaspAlarm
+from voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+from voltha.extensions.alarms.onu.onu_lopc_miss_alarm import OnuLopcMissAlarm
+from voltha.extensions.alarms.onu.onu_lopc_mic_error_alarm import OnuLopcMicErrorAlarm
+from voltha.extensions.alarms.onu.onu_lob_alarm import OnuLobAlarm
+
+from voltha.extensions.alarms.onu.onu_startup_alarm import OnuStartupAlarm
+from voltha.extensions.alarms.onu.onu_signal_degrade_alarm import OnuSignalDegradeAlarm
+from voltha.extensions.alarms.onu.onu_signal_fail_alarm import OnuSignalFailAlarm
+from voltha.extensions.alarms.onu.onu_window_drift_alarm import OnuWindowDriftAlarm
+from voltha.extensions.alarms.onu.onu_activation_fail_alarm import OnuActivationFailAlarm
+
+import protos.openolt_pb2 as openolt_pb2
+import voltha.protos.device_pb2 as device_pb2
+
+
+class OpenOltAlarmMgr(object):
+    def __init__(self, log, adapter_agent, device_id, logical_device_id,
+                 platform):
+        """
+        20180711 -  Addition of adapter_agent and device_id
+            to facilitate alarm processing and kafka posting
+        :param log:
+        :param adapter_agent:
+        :param device_id:
+        """
+        self.log = log
+        self.adapter_agent = adapter_agent
+        self.device_id = device_id
+        self.logical_device_id = logical_device_id
+        self.platform = platform
+        """
+        The following is added to reduce the continual posting of OLT LOS alarming
+        to Kafka.   Set enable_alarm_suppress = true to enable  otherwise the
+        current openolt bal will send continuous olt los alarm cleared messages
+        ONU disc raised counter is place holder for a future addition
+        """
+        self.enable_alarm_suppress = True
+        self.alarm_suppress = {"olt_los_clear": 0, "onu_disc_raised": []}  # Keep count of alarms to limit.
+        try:
+            self.alarms = AdapterAlarms(self.adapter_agent, self.device_id, self.logical_device_id)
+            self.simulator = AdapterAlarmSimulator(self.alarms)
+        except Exception as initerr:
+            self.log.exception("alarmhandler-init-error", errmsg=initerr.message)
+            raise Exception(initerr)
+
+    def process_alarms(self, alarm_ind):
+        try:
+            self.log.debug('alarm-indication', alarm=alarm_ind, device_id=self.device_id)
+            if alarm_ind.HasField('los_ind'):
+                self.los_indication(alarm_ind.los_ind)
+            elif alarm_ind.HasField('dying_gasp_ind'):
+                self.dying_gasp_indication(alarm_ind.dying_gasp_ind)
+            elif alarm_ind.HasField('onu_alarm_ind'):
+                self.onu_alarm_indication(alarm_ind.onu_alarm_ind)
+            elif alarm_ind.HasField('onu_startup_fail_ind'):
+                self.onu_startup_failure_indication(
+                    alarm_ind.onu_startup_fail_ind)
+            elif alarm_ind.HasField('onu_signal_degrade_ind'):
+                self.onu_signal_degrade_indication(
+                    alarm_ind.onu_signal_degrade_ind)
+            elif alarm_ind.HasField('onu_drift_of_window_ind'):
+                self.onu_drift_of_window_indication(
+                    alarm_ind.onu_drift_of_window_ind)
+            elif alarm_ind.HasField('onu_loss_omci_ind'):
+                self.onu_loss_omci_indication(alarm_ind.onu_loss_omci_ind)
+            elif alarm_ind.HasField('onu_signals_fail_ind'):
+                self.onu_signals_failure_indication(
+                    alarm_ind.onu_signals_fail_ind)
+            elif alarm_ind.HasField('onu_tiwi_ind'):
+                self.onu_transmission_interference_warning(
+                    alarm_ind.onu_tiwi_ind)
+            elif alarm_ind.HasField('onu_activation_fail_ind'):
+                self.onu_activation_failure_indication(
+                    alarm_ind.onu_activation_fail_ind)
+            elif alarm_ind.HasField('onu_processing_error_ind'):
+                self.onu_processing_error_indication(
+                    alarm_ind.onu_processing_error_ind)
+            else:
+                self.log.warn('unknown alarm type', alarm=alarm_ind)
+
+        except Exception as e:
+            self.log.error('sorting of alarm went wrong', error=e,
+                           alarm=alarm_ind)
+
+    def simulate_alarm(self, alarm):
+        self.simulator.simulate_alarm(alarm)
+
+    def los_indication(self, los_ind):
+
+        try:
+            self.log.debug('los indication received', los_ind=los_ind,
+                           int_id=los_ind.intf_id, status=los_ind.status)
+            try:
+                port_type_name = self.platform.intf_id_to_port_type_name(los_ind.intf_id)
+                if los_ind.status == 1 or los_ind.status == "on":
+                    # Zero out the suppression counter on OLT_LOS raise
+                    self.alarm_suppress['olt_los_clear'] = 0
+                    OltLosAlarm(self.alarms, intf_id=los_ind.intf_id, port_type_name=port_type_name).raise_alarm()
+                else:
+                    """
+                        Check if there has been more that one los clear following a previous los
+                    """
+                    if self.alarm_suppress['olt_los_clear'] == 0 and self.enable_alarm_suppress:
+                        OltLosAlarm(self.alarms, intf_id=los_ind.intf_id, port_type_name=port_type_name).clear_alarm()
+                        self.alarm_suppress['olt_los_clear'] += 1
+
+            except Exception as alarm_err:
+                self.log.error('los-indication', errmsg=alarm_err.message)
+        except Exception as e:
+            self.log.error('los-indication', errmsg=e.message)
+
+    def dying_gasp_indication(self, dying_gasp_ind):
+        try:
+            alarm_dgi = dying_gasp_ind
+            onu_id = alarm_dgi.onu_id
+            self.log.debug('openolt-alarmindication-dispatch-dying-gasp', int_id=alarm_dgi.intf_id,
+                           onu_id=alarm_dgi.onu_id, status=alarm_dgi.status)
+            try:
+                """
+                Get the specific onu device information for the onu generating the alarm.
+                Extract the id.   In the future extract the serial number as well
+                """
+                onu_device_id = "unresolved"
+                onu_serial_number = "unresolved"
+                onu_device = self.resolve_onu_id(onu_id, port_intf_id=alarm_dgi.intf_id)
+                if onu_device != None:
+                    onu_device_id = onu_device.id
+                    onu_serial_number = onu_device.serial_number
+
+                if dying_gasp_ind.status == 1 or dying_gasp_ind.status == "on":
+                    OnuDyingGaspAlarm(self.alarms, dying_gasp_ind.intf_id,
+                                      onu_device_id).raise_alarm()
+                else:
+                    OnuDyingGaspAlarm(self.alarms, dying_gasp_ind.intf_id,
+                                      onu_device_id).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception('dying-gasp-indication', errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.error('dying_gasp_indication', error=e)
+
+    def onu_alarm_indication(self, onu_alarm_ind):
+        """
+        LOB = Los of burst
+        LOPC = Loss of PLOAM miss channel
+
+        :param onu_alarm_ind:  Alarm indication which currently contains
+            onu_id:
+            los_status:
+            lob_status:
+            lopc_miss_status:
+            lopc_mic_error_status:
+        :return:
+        """
+        self.log.info('onu-alarm-indication')
+
+        try:
+            self.log.debug('onu alarm indication received', los_status=onu_alarm_ind.los_status,
+                           onu_intf_id=onu_alarm_ind.onu_id,
+                           lob_status=onu_alarm_ind.lob_status,
+                           lopc_miss_status=onu_alarm_ind.lopc_miss_status,
+                           lopc_mic_error_status=onu_alarm_ind.lopc_mic_error_status,
+                           intf_id=onu_alarm_ind.intf_id
+                           )
+
+            try:
+                """
+                    Get the specific onu device information for the onu generating the alarm.
+                    Extract the id.   In the future extract the serial number as well
+                """
+                onu_device_id = "unresolved"
+                serial_number = "unresolved"
+                onu_device = self.resolve_onu_id(onu_alarm_ind.onu_id,  port_intf_id=onu_alarm_ind.intf_id)
+                if onu_device != None:
+                    onu_device_id = onu_device.id
+                    serial_number = onu_device.serial_number
+
+                if onu_alarm_ind.los_status == 1 or onu_alarm_ind.los_status == "on":
+                    OnuLosAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.los_status == 0 or onu_alarm_ind.los_status == "off":
+                    OnuLosAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lopc_miss_status == 1 or onu_alarm_ind.lopc_miss_status == "on":
+                    OnuLopcMissAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif (onu_alarm_ind.lopc_miss_status == 0 or onu_alarm_ind.lopc_miss_status == "off"):
+                    OnuLopcMissAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lopc_mic_error_status == 1 or onu_alarm_ind.lopc_mic_error_status == "on":
+                    OnuLopcMicErrorAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.lopc_mic_error_status == 0 or onu_alarm_ind.lopc_mic_error_status == "off":
+                    OnuLopcMicErrorAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lob_status == 1 or onu_alarm_ind.lob_status == "on":
+                    OnuLobAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.lob_status == 0 or onu_alarm_ind.lob_status == "off":
+                    OnuLobAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+            except Exception as alarm_err:
+                self.log.exception('onu-alarm-indication', errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception('onu-alarm-indication', errmsg=e.message)
+
+    def onu_startup_failure_indication(self, onu_startup_fail_ind):
+        """
+        Current protobuf indicator:
+        message OnuStartupFailureIndication {
+                fixed32 intf_id = 1;
+                fixed32 onu_id = 2;
+                string status = 3;
+            }
+
+        :param onu_startup_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_startup_fail_ind
+            label = "onu-startup-failure-indication"
+            self.log.debug(label + " received", onu_startup_fail_ind=ind, int_id=ind.intf_id, onu_id=ind.onu_id, status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuStartupAlarm(self.alarms, intf_id=ind.intf_id,onu_id=ind.onu_id).raise_alarm()
+                else:
+                    OnuStartupAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_signal_degrade_indication(self, onu_signal_degrade_ind):
+        """
+        Current protobuf indicator:
+        OnuSignalDegradeIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 inverse_bit_error_rate = 4;
+        }
+        :param onu_signal_degrade_ind:
+        :return:
+        """
+        try:
+            ind = onu_signal_degrade_ind
+            label = "onu-signal-degrade-indication"
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuSignalDegradeAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id,
+                                          inverse_bit_error_rate=ind.inverse_bit_error_rate).raise_alarm()
+                else:
+                    OnuSignalDegradeAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id,
+                                          inverse_bit_error_rate=ind.inverse_bit_error_rate).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_drift_of_window_indication(self, onu_drift_of_window_ind):
+        """
+        Current protobuf indicator:
+        OnuDriftOfWindowIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 drift = 4;
+            fixed32 new_eqd = 5;
+        }
+
+        :param onu_drift_of_window_ind:
+        :return:
+        """
+        try:
+            ind = onu_drift_of_window_ind
+            label = "onu-window-drift-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_drift_of_window_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuWindowDriftAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd).raise_alarm()
+                else:
+                    OnuWindowDriftAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_loss_omci_indication(self, onu_loss_omci_ind):
+        self.log.info('not implemented yet')
+
+    def onu_signals_failure_indication(self, onu_signals_fail_ind):
+        """
+        Current protobuf indicator:
+        OnuSignalsFailureIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 inverse_bit_error_rate = 4;
+        }
+
+        :param onu_signals_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_signals_fail_ind
+            label = "onu-signal-failure-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           onu_serial_number=onu_serial_number,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuSignalFailAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate).raise_alarm()
+                else:
+                    OnuSignalFailAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+
+    def onu_transmission_interference_warning(self, onu_tiwi_ind):
+        self.log.info('not implemented yet')
+
+    def onu_activation_failure_indication(self, onu_activation_fail_ind):
+        """
+
+        No status is currently passed with this alarm. Consequently it will always just raise
+        :param onu_activation_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_activation_fail_ind
+            label = "onu-activation-failure-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           onu_serial_number=onu_serial_number)
+            try:
+
+                OnuActivationFailAlarm(self.alarms, intf_id=ind.intf_id,
+                       onu_id=onu_device_id).raise_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_processing_error_indication(self, onu_processing_error_ind):
+        self.log.info('not implemented yet')
+
+    """
+    Helper Methods
+    """
+
+    def resolve_onudev_id_onudev_serialnum(self,onu_device):
+        """
+        Convenience wrapper to resolve device_id and serial number
+        :param onu_device:
+        :return: tuple: onu_device_id, onu_serial_number
+        """
+        try:
+            onu_device_id = "unresolved"
+            onu_serial_number = "unresolved"
+            if onu_device != None:
+                onu_device_id = onu_device.id
+                onu_serial_number = onu_device.serial_number
+        except Exception as err:
+            self.log.exception("openolt-alarms-resolve-onudev-id  ", errmsg=err.message)
+            raise Exception(err)
+        return onu_device_id, onu_serial_number
+
+    def resolve_onu_id(self, onu_id, port_intf_id):
+        """
+        Resolve the onu_device from the intf_id value and port. Uses the adapter agent to
+        resolve this..
+
+        Returns None if not found. Caller will have to test for None and act accordingly.
+        :param onu_id:
+        :param port_intf_id:
+        :return:
+        """
+
+        try:
+            onu_device = None
+            onu_device = self.adapter_agent.get_child_device(
+                self.device_id,
+                parent_port_no=self.platform.intf_id_to_port_no(
+                    port_intf_id, device_pb2.Port.PON_OLT),
+                onu_id=onu_id)
+            onu_device_id = onu_device.id
+        except Exception as inner:
+            self.log.exception('resolve-onu-id', errmsg=inner.message)
+
+        return onu_device
+
diff --git a/python/adapters/openolt/openolt_bw.py b/python/adapters/openolt/openolt_bw.py
new file mode 100644
index 0000000..7c70b78
--- /dev/null
+++ b/python/adapters/openolt/openolt_bw.py
@@ -0,0 +1,41 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+DEFAULT_ONU_BW_PROFILE = "default"
+DEFAULT_ONU_PIR = 1000000  # 1Gbps
+
+
+class OpenOltBW(object):
+
+    def __init__(self, log, proxy):
+        self.log = log
+        self.proxy = proxy
+
+    def pir(self, serial_number):
+        bw = 0
+        try:
+            bw = self.proxy.get(
+                '/traffic_descriptor_profiles/{}'.format(serial_number))
+        except KeyError:
+            self.log.debug('bandwidth not configured',
+                          serial_number=serial_number)
+            try:
+                bw = self.proxy.get('/traffic_descriptor_profiles/{}' \
+                                    .format(DEFAULT_ONU_BW_PROFILE))
+            except KeyError:
+                return DEFAULT_ONU_PIR
+
+        return bw.maximum_bandwidth
diff --git a/python/adapters/openolt/openolt_device.py b/python/adapters/openolt/openolt_device.py
new file mode 100644
index 0000000..2779dc6
--- /dev/null
+++ b/python/adapters/openolt/openolt_device.py
@@ -0,0 +1,1074 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import threading
+import binascii
+import grpc
+import socket
+import re
+import structlog
+from twisted.internet import reactor
+from scapy.layers.l2 import Ether, Dot1Q
+from transitions import Machine
+
+#from voltha.adapters.openolt.protos import openolt_pb2_grpc, openolt_pb2
+from python.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
+from python.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
+
+from python.adapters.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
+
+from python.common.utils.nethelpers import mac_str_to_tuple
+from python.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPS_LINK_DOWN, OFPPF_1GB_FD, \
+    OFPC_GROUP_STATS, OFPC_PORT_STATS, OFPC_TABLE_STATS, OFPC_FLOW_STATS, \
+    ofp_switch_features, ofp_port, ofp_port_stats, ofp_desc
+from python.common.utils.registry import registry
+from python.protos import openolt_pb2
+from python.protos import third_party
+from python.protos.common_pb2 import AdminStatus, OperStatus, ConnectStatus
+from python.protos.common_pb2 import LogLevel
+from python.protos.device_pb2 import Port, Device
+
+from python.protos.logical_device_pb2 import LogicalDevice, LogicalPort
+
+class OpenoltDevice(object):
+    """
+    OpenoltDevice state machine:
+
+        null ----> init ------> connected -----> up -----> down
+                   ^ ^             |             ^         | |
+                   | |             |             |         | |
+                   | +-------------+             +---------+ |
+                   |                                         |
+                   +-----------------------------------------+
+    """
+    # pylint: disable=too-many-instance-attributes
+    # pylint: disable=R0904
+    states = [
+        'state_null',
+        'state_init',
+        'state_connected',
+        'state_up',
+        'state_down']
+
+    transitions = [
+        {'trigger': 'go_state_init',
+         'source': ['state_null', 'state_connected', 'state_down'],
+         'dest': 'state_init',
+         'before': 'do_state_init',
+         'after': 'post_init'},
+        {'trigger': 'go_state_connected',
+         'source': 'state_init',
+         'dest': 'state_connected',
+         'before': 'do_state_connected'},
+        {'trigger': 'go_state_up',
+         'source': ['state_connected', 'state_down'],
+         'dest': 'state_up',
+         'before': 'do_state_up'},
+        {'trigger': 'go_state_down',
+         'source': ['state_up'],
+         'dest': 'state_down',
+         'before': 'do_state_down',
+         'after': 'post_down'}]
+
+    def __init__(self, **kwargs):
+        super(OpenoltDevice, self).__init__()
+
+        self.adapter_agent = kwargs['adapter_agent']
+        self.device_num = kwargs['device_num']
+        device = kwargs['device']
+
+        self.platform_class = kwargs['support_classes']['platform']
+        self.resource_mgr_class = kwargs['support_classes']['resource_mgr']
+        self.flow_mgr_class = kwargs['support_classes']['flow_mgr']
+        self.alarm_mgr_class = kwargs['support_classes']['alarm_mgr']
+        self.stats_mgr_class = kwargs['support_classes']['stats_mgr']
+        self.bw_mgr_class = kwargs['support_classes']['bw_mgr']
+
+        is_reconciliation = kwargs.get('reconciliation', False)
+        self.device_id = device.id
+        self.host_and_port = device.host_and_port
+        self.extra_args = device.extra_args
+        self.log = structlog.get_logger(id=self.device_id,
+                                        ip=self.host_and_port)
+        self.proxy = registry('core').get_proxy('/')
+
+        self.log.info('openolt-device-init')
+
+        # default device id and device serial number. If device_info provides better results, they will be updated
+        self.dpid = kwargs.get('dp_id')
+        self.serial_number = self.host_and_port  # FIXME
+
+        # Device already set in the event of reconciliation
+        if not is_reconciliation:
+            self.log.info('updating-device')
+            # It is a new device
+            # Update device
+            device.root = True
+            device.connect_status = ConnectStatus.UNREACHABLE
+            device.oper_status = OperStatus.ACTIVATING
+            self.adapter_agent.update_device(device)
+
+        # If logical device does exist use it, else create one after connecting to device
+        if device.parent_id:
+            # logical device already exists
+            self.logical_device_id = device.parent_id
+            if is_reconciliation:
+                self.adapter_agent.reconcile_logical_device(
+                    self.logical_device_id)
+
+        # Initialize the OLT state machine
+        self.machine = Machine(model=self, states=OpenoltDevice.states,
+                               transitions=OpenoltDevice.transitions,
+                               send_event=True, initial='state_null')
+        self.go_state_init()
+
+    def create_logical_device(self, device_info):
+        dpid = device_info.device_id
+        serial_number = device_info.device_serial_number
+
+        if dpid is None: dpid = self.dpid
+        if serial_number is None: serial_number = self.serial_number
+
+        if dpid == None or dpid == '':
+            uri = self.host_and_port.split(":")[0]
+            try:
+                socket.inet_pton(socket.AF_INET, uri)
+                dpid = '00:00:' + self.ip_hex(uri)
+            except socket.error:
+                # this is not an IP
+                dpid = self.stringToMacAddr(uri)
+
+        if serial_number == None or serial_number == '':
+            serial_number = self.host_and_port
+
+        self.log.info('creating-openolt-logical-device', dp_id=dpid, serial_number=serial_number)
+
+        mfr_desc = device_info.vendor
+        sw_desc = device_info.firmware_version
+        hw_desc = device_info.model
+        if device_info.hardware_version: hw_desc += '-' + device_info.hardware_version
+
+        # Create logical OF device
+        ld = LogicalDevice(
+            root_device_id=self.device_id,
+            switch_features=ofp_switch_features(
+                n_buffers=256,  # TODO fake for now
+                n_tables=2,  # TODO ditto
+                capabilities=(  # TODO and ditto
+                        OFPC_FLOW_STATS
+                        | OFPC_TABLE_STATS
+                        | OFPC_PORT_STATS
+                        | OFPC_GROUP_STATS
+                )
+            ),
+            desc=ofp_desc(
+                serial_num=serial_number
+            )
+        )
+        ld_init = self.adapter_agent.create_logical_device(ld,
+                                                           dpid=dpid)
+
+        self.logical_device_id = ld_init.id
+
+        device = self.adapter_agent.get_device(self.device_id)
+        device.serial_number = serial_number
+        self.adapter_agent.update_device(device)
+
+        self.dpid = dpid
+        self.serial_number = serial_number
+
+        self.log.info('created-openolt-logical-device', logical_device_id=ld_init.id)
+
+    def stringToMacAddr(self, uri):
+        regex = re.compile('[^a-zA-Z]')
+        uri = regex.sub('', uri)
+
+        l = len(uri)
+        if l > 6:
+            uri = uri[0:6]
+        else:
+            uri = uri + uri[0:6 - l]
+
+        print uri
+
+        return ":".join([hex(ord(x))[-2:] for x in uri])
+
+    def do_state_init(self, event):
+        # Initialize gRPC
+        self.channel = grpc.insecure_channel(self.host_and_port)
+        self.channel_ready_future = grpc.channel_ready_future(self.channel)
+
+        self.log.info('openolt-device-created', device_id=self.device_id)
+
+    def post_init(self, event):
+        self.log.debug('post_init')
+
+        # We have reached init state, starting the indications thread
+
+        # Catch RuntimeError exception
+        try:
+            # Start indications thread
+            self.indications_thread_handle = threading.Thread(
+                target=self.indications_thread)
+            # Old getter/setter API for daemon; use it directly as a
+            # property instead. The Jinkins error will happon on the reason of
+            # Exception in thread Thread-1 (most likely raised # during
+            # interpreter shutdown)
+            self.indications_thread_handle.setDaemon(True)
+            self.indications_thread_handle.start()
+        except Exception as e:
+            self.log.exception('post_init failed', e=e)
+
+    def do_state_connected(self, event):
+        self.log.debug("do_state_connected")
+
+        device = self.adapter_agent.get_device(self.device_id)
+
+        self.stub = openolt_pb2_grpc.OpenoltStub(self.channel)
+
+        device_info = self.stub.GetDeviceInfo(openolt_pb2.Empty())
+        self.log.info('Device connected', device_info=device_info)
+
+        self.create_logical_device(device_info)
+
+        device.serial_number = self.serial_number
+
+        self.resource_mgr = self.resource_mgr_class(self.device_id,
+                                                    self.host_and_port,
+                                                    self.extra_args,
+                                                    device_info)
+        self.platform = self.platform_class(self.log, self.resource_mgr)
+        self.flow_mgr = self.flow_mgr_class(self.adapter_agent, self.log,
+                                            self.stub, self.device_id,
+                                            self.logical_device_id,
+                                            self.platform, self.resource_mgr)
+
+        self.alarm_mgr = self.alarm_mgr_class(self.log, self.adapter_agent,
+                                              self.device_id,
+                                              self.logical_device_id,
+                                              self.platform)
+        self.stats_mgr = self.stats_mgr_class(self, self.log, self.platform)
+        self.bw_mgr = self.bw_mgr_class(self.log, self.proxy)
+
+        device.vendor = device_info.vendor
+        device.model = device_info.model
+        device.hardware_version = device_info.hardware_version
+        device.firmware_version = device_info.firmware_version
+
+        # TODO: check for uptime and reboot if too long (VOL-1192)
+
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.update_device(device)
+
+    def do_state_up(self, event):
+        self.log.debug("do_state_up")
+
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Update phys OF device
+        device.parent_id = self.logical_device_id
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.update_device(device)
+
+    def do_state_down(self, event):
+        self.log.debug("do_state_down")
+        oper_state = OperStatus.UNKNOWN
+        connect_state = ConnectStatus.UNREACHABLE
+
+        # Propagating to the children
+
+        # Children ports
+        child_devices = self.adapter_agent.get_child_devices(self.device_id)
+        for onu_device in child_devices:
+            onu_adapter_agent = \
+                registry('adapter_loader').get_agent(onu_device.adapter)
+            onu_adapter_agent.update_interface(onu_device,
+                                               {'oper_state': 'down'})
+            self.onu_ports_down(onu_device, oper_state)
+
+        # Children devices
+        self.adapter_agent.update_child_devices_state(
+            self.device_id, oper_status=oper_state,
+            connect_status=connect_state)
+        # Device Ports
+        device_ports = self.adapter_agent.get_ports(self.device_id,
+                                                    Port.ETHERNET_NNI)
+        logical_ports_ids = [port.label for port in device_ports]
+        device_ports += self.adapter_agent.get_ports(self.device_id,
+                                                     Port.PON_OLT)
+
+        for port in device_ports:
+            port.oper_status = oper_state
+            self.adapter_agent.add_port(self.device_id, port)
+
+        # Device logical port
+        for logical_port_id in logical_ports_ids:
+            logical_port = self.adapter_agent.get_logical_port(
+                self.logical_device_id, logical_port_id)
+            logical_port.ofp_port.state = OFPPS_LINK_DOWN
+            self.adapter_agent.update_logical_port(self.logical_device_id,
+                                                   logical_port)
+
+        # Device
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = oper_state
+        device.connect_status = connect_state
+
+        reactor.callLater(2, self.adapter_agent.update_device, device)
+
+    # def post_up(self, event):
+    #     self.log.debug('post-up')
+    #     self.flow_mgr.reseed_flows()
+
+    def post_down(self, event):
+        self.log.debug('post_down')
+        self.flow_mgr.reset_flows()
+
+    def indications_thread(self):
+        self.log.debug('starting-indications-thread')
+        self.log.debug('connecting to olt', device_id=self.device_id)
+        self.channel_ready_future.result()  # blocking call
+        self.log.info('connected to olt', device_id=self.device_id)
+        self.go_state_connected()
+
+        self.indications = self.stub.EnableIndication(openolt_pb2.Empty())
+
+        while True:
+            try:
+                # get the next indication from olt
+                ind = next(self.indications)
+            except Exception as e:
+                self.log.warn('gRPC connection lost', error=e)
+                reactor.callFromThread(self.go_state_down)
+                reactor.callFromThread(self.go_state_init)
+                break
+            else:
+                self.log.debug("rx indication", indication=ind)
+
+                # indication handlers run in the main event loop
+                if ind.HasField('olt_ind'):
+                    reactor.callFromThread(self.olt_indication, ind.olt_ind)
+                elif ind.HasField('intf_ind'):
+                    reactor.callFromThread(self.intf_indication, ind.intf_ind)
+                elif ind.HasField('intf_oper_ind'):
+                    reactor.callFromThread(self.intf_oper_indication,
+                                           ind.intf_oper_ind)
+                elif ind.HasField('onu_disc_ind'):
+                    reactor.callFromThread(self.onu_discovery_indication,
+                                           ind.onu_disc_ind)
+                elif ind.HasField('onu_ind'):
+                    reactor.callFromThread(self.onu_indication, ind.onu_ind)
+                elif ind.HasField('omci_ind'):
+                    reactor.callFromThread(self.omci_indication, ind.omci_ind)
+                elif ind.HasField('pkt_ind'):
+                    reactor.callFromThread(self.packet_indication, ind.pkt_ind)
+                elif ind.HasField('port_stats'):
+                    reactor.callFromThread(
+                        self.stats_mgr.port_statistics_indication,
+                        ind.port_stats)
+                elif ind.HasField('flow_stats'):
+                    reactor.callFromThread(
+                        self.stats_mgr.flow_statistics_indication,
+                        ind.flow_stats)
+                elif ind.HasField('alarm_ind'):
+                    reactor.callFromThread(self.alarm_mgr.process_alarms,
+                                           ind.alarm_ind)
+                else:
+                    self.log.warn('unknown indication type')
+
+    def olt_indication(self, olt_indication):
+        if olt_indication.oper_state == "up":
+            self.go_state_up()
+        elif olt_indication.oper_state == "down":
+            self.go_state_down()
+
+    def intf_indication(self, intf_indication):
+        self.log.debug("intf indication", intf_id=intf_indication.intf_id,
+                       oper_state=intf_indication.oper_state)
+
+        if intf_indication.oper_state == "up":
+            oper_status = OperStatus.ACTIVE
+        else:
+            oper_status = OperStatus.DISCOVERED
+
+        # add_port update the port if it exists
+        self.add_port(intf_indication.intf_id, Port.PON_OLT, oper_status)
+
+    def intf_oper_indication(self, intf_oper_indication):
+        self.log.debug("Received interface oper state change indication",
+                       intf_id=intf_oper_indication.intf_id,
+                       type=intf_oper_indication.type,
+                       oper_state=intf_oper_indication.oper_state)
+
+        if intf_oper_indication.oper_state == "up":
+            oper_state = OperStatus.ACTIVE
+        else:
+            oper_state = OperStatus.DISCOVERED
+
+        if intf_oper_indication.type == "nni":
+
+            # add_(logical_)port update the port if it exists
+            port_no, label = self.add_port(intf_oper_indication.intf_id,
+                                           Port.ETHERNET_NNI, oper_state)
+            self.log.debug("int_oper_indication", port_no=port_no, label=label)
+            self.add_logical_port(port_no, intf_oper_indication.intf_id,
+                                  oper_state)
+
+        elif intf_oper_indication.type == "pon":
+            # FIXME - handle PON oper state change
+            pass
+
+    def onu_discovery_indication(self, onu_disc_indication):
+        intf_id = onu_disc_indication.intf_id
+        serial_number = onu_disc_indication.serial_number
+
+        serial_number_str = self.stringify_serial_number(serial_number)
+
+        self.log.debug("onu discovery indication", intf_id=intf_id,
+                       serial_number=serial_number_str)
+
+        # Post ONU Discover alarm  20180809_0805
+        try:
+            OnuDiscoveryAlarm(self.alarm_mgr.alarms, pon_id=intf_id,
+                              serial_number=serial_number_str).raise_alarm()
+        except Exception as disc_alarm_error:
+            self.log.exception("onu-discovery-alarm-error",
+                               errmsg=disc_alarm_error.message)
+            # continue for now.
+
+        onu_device = self.adapter_agent.get_child_device(
+            self.device_id,
+            serial_number=serial_number_str)
+
+        if onu_device is None:
+            try:
+                onu_id = self.resource_mgr.get_onu_id(intf_id)
+                if onu_id is None:
+                    raise Exception("onu-id-unavailable")
+
+                self.add_onu_device(
+                    intf_id,
+                    self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT),
+                    onu_id, serial_number)
+                self.activate_onu(intf_id, onu_id, serial_number,
+                                  serial_number_str)
+            except Exception as e:
+                self.log.exception('onu-activation-failed', e=e)
+
+        else:
+            if onu_device.connect_status != ConnectStatus.REACHABLE:
+                onu_device.connect_status = ConnectStatus.REACHABLE
+                self.adapter_agent.update_device(onu_device)
+
+            onu_id = onu_device.proxy_address.onu_id
+            if onu_device.oper_status == OperStatus.DISCOVERED \
+                    or onu_device.oper_status == OperStatus.ACTIVATING:
+                self.log.debug("ignore onu discovery indication, \
+                               the onu has been discovered and should be \
+                               activating shorlty", intf_id=intf_id,
+                               onu_id=onu_id, state=onu_device.oper_status)
+            elif onu_device.oper_status == OperStatus.ACTIVE:
+                self.log.warn("onu discovery indication whereas onu is \
+                              supposed to be active",
+                              intf_id=intf_id, onu_id=onu_id,
+                              state=onu_device.oper_status)
+            elif onu_device.oper_status == OperStatus.UNKNOWN:
+                self.log.info("onu in unknown state, recovering from olt \
+                              reboot probably, activate onu", intf_id=intf_id,
+                              onu_id=onu_id, serial_number=serial_number_str)
+
+                onu_device.oper_status = OperStatus.DISCOVERED
+                self.adapter_agent.update_device(onu_device)
+                try:
+                    self.activate_onu(intf_id, onu_id, serial_number,
+                                      serial_number_str)
+                except Exception as e:
+                    self.log.error('onu-activation-error',
+                                   serial_number=serial_number_str, error=e)
+            else:
+                self.log.warn('unexpected state', onu_id=onu_id,
+                              onu_device_oper_state=onu_device.oper_status)
+
+    def onu_indication(self, onu_indication):
+        self.log.debug("onu indication", intf_id=onu_indication.intf_id,
+                       onu_id=onu_indication.onu_id,
+                       serial_number=onu_indication.serial_number,
+                       oper_state=onu_indication.oper_state,
+                       admin_state=onu_indication.admin_state)
+        try:
+            serial_number_str = self.stringify_serial_number(
+                onu_indication.serial_number)
+        except Exception as e:
+            serial_number_str = None
+
+        if serial_number_str is not None:
+            onu_device = self.adapter_agent.get_child_device(
+                self.device_id,
+                serial_number=serial_number_str)
+        else:
+            onu_device = self.adapter_agent.get_child_device(
+                self.device_id,
+                parent_port_no=self.platform.intf_id_to_port_no(
+                    onu_indication.intf_id, Port.PON_OLT),
+                onu_id=onu_indication.onu_id)
+
+        if onu_device is None:
+            self.log.error('onu not found', intf_id=onu_indication.intf_id,
+                           onu_id=onu_indication.onu_id)
+            return
+
+        if self.platform.intf_id_from_pon_port_no(onu_device.parent_port_no) \
+                != onu_indication.intf_id:
+            self.log.warn('ONU-is-on-a-different-intf-id-now',
+                          previous_intf_id=self.platform.intf_id_from_pon_port_no(
+                              onu_device.parent_port_no),
+                          current_intf_id=onu_indication.intf_id)
+            # FIXME - handle intf_id mismatch (ONU move?)
+
+        if onu_device.proxy_address.onu_id != onu_indication.onu_id:
+            # FIXME - handle onu id mismatch
+            self.log.warn('ONU-id-mismatch, can happen if both voltha and '
+                          'the olt rebooted',
+                          expected_onu_id=onu_device.proxy_address.onu_id,
+                          received_onu_id=onu_indication.onu_id)
+
+        # Admin state
+        if onu_indication.admin_state == 'down':
+            if onu_indication.oper_state != 'down':
+                self.log.error('ONU-admin-state-down-and-oper-status-not-down',
+                               oper_state=onu_indication.oper_state)
+                # Forcing the oper state change code to execute
+                onu_indication.oper_state = 'down'
+
+            # Port and logical port update is taken care of by oper state block
+
+        elif onu_indication.admin_state == 'up':
+            pass
+
+        else:
+            self.log.warn('Invalid-or-not-implemented-admin-state',
+                          received_admin_state=onu_indication.admin_state)
+
+        self.log.debug('admin-state-dealt-with')
+
+        onu_adapter_agent = \
+            registry('adapter_loader').get_agent(onu_device.adapter)
+        if onu_adapter_agent is None:
+            self.log.error('onu_adapter_agent-could-not-be-retrieved',
+                           onu_device=onu_device)
+            return
+
+        # Operating state
+        if onu_indication.oper_state == 'down':
+
+            if onu_device.connect_status != ConnectStatus.UNREACHABLE:
+                onu_device.connect_status = ConnectStatus.UNREACHABLE
+                self.adapter_agent.update_device(onu_device)
+
+            # Move to discovered state
+            self.log.debug('onu-oper-state-is-down')
+
+            if onu_device.oper_status != OperStatus.DISCOVERED:
+                onu_device.oper_status = OperStatus.DISCOVERED
+                self.adapter_agent.update_device(onu_device)
+            # Set port oper state to Discovered
+            self.onu_ports_down(onu_device, OperStatus.DISCOVERED)
+
+            onu_adapter_agent.update_interface(onu_device,
+                                               {'oper_state': 'down'})
+
+        elif onu_indication.oper_state == 'up':
+
+            if onu_device.connect_status != ConnectStatus.REACHABLE:
+                onu_device.connect_status = ConnectStatus.REACHABLE
+                self.adapter_agent.update_device(onu_device)
+
+            if onu_device.oper_status != OperStatus.DISCOVERED:
+                self.log.debug("ignore onu indication",
+                               intf_id=onu_indication.intf_id,
+                               onu_id=onu_indication.onu_id,
+                               state=onu_device.oper_status,
+                               msg_oper_state=onu_indication.oper_state)
+                return
+
+            # Device was in Discovered state, setting it to active
+
+            # Prepare onu configuration
+
+            onu_adapter_agent.create_interface(onu_device, onu_indication)
+
+        else:
+            self.log.warn('Not-implemented-or-invalid-value-of-oper-state',
+                          oper_state=onu_indication.oper_state)
+
+    def onu_ports_down(self, onu_device, oper_state):
+        # Set port oper state to Discovered
+        # add port will update port if it exists
+        # self.adapter_agent.add_port(
+        #    self.device_id,
+        #    Port(
+        #        port_no=uni_no,
+        #        label=uni_name,
+        #        type=Port.ETHERNET_UNI,
+        #        admin_state=onu_device.admin_state,
+        #        oper_status=oper_state))
+        # TODO this should be downning ports in onu adatper
+
+        # Disable logical port
+        onu_ports = self.proxy.get('devices/{}/ports'.format(onu_device.id))
+        for onu_port in onu_ports:
+            self.log.debug('onu-ports-down', onu_port=onu_port)
+            onu_port_id = onu_port.label
+            try:
+                onu_logical_port = self.adapter_agent.get_logical_port(
+                    logical_device_id=self.logical_device_id, port_id=onu_port_id)
+                onu_logical_port.ofp_port.state = OFPPS_LINK_DOWN
+                self.adapter_agent.update_logical_port(
+                    logical_device_id=self.logical_device_id,
+                    port=onu_logical_port)
+                self.log.debug('cascading-oper-state-to-port-and-logical-port')
+            except KeyError as e:
+                self.log.error('matching-onu-port-label-invalid',
+                               onu_id=onu_device.id, olt_id=self.device_id,
+                               onu_ports=onu_ports, onu_port_id=onu_port_id,
+                               error=e)
+
+    def omci_indication(self, omci_indication):
+
+        self.log.debug("omci indication", intf_id=omci_indication.intf_id,
+                       onu_id=omci_indication.onu_id)
+
+        onu_device = self.adapter_agent.get_child_device(
+            self.device_id, onu_id=omci_indication.onu_id,
+            parent_port_no=self.platform.intf_id_to_port_no(
+                omci_indication.intf_id, Port.PON_OLT), )
+
+        self.adapter_agent.receive_proxied_message(onu_device.proxy_address,
+                                                   omci_indication.pkt)
+
+    def packet_indication(self, pkt_indication):
+
+        self.log.debug("packet indication",
+                       intf_type=pkt_indication.intf_type,
+                       intf_id=pkt_indication.intf_id,
+                       port_no=pkt_indication.port_no,
+                       cookie=pkt_indication.cookie,
+                       gemport_id=pkt_indication.gemport_id,
+                       flow_id=pkt_indication.flow_id)
+
+        if pkt_indication.intf_type == "pon":
+            if pkt_indication.port_no:
+                logical_port_num = pkt_indication.port_no
+            else:  # TODO Remove this else block after openolt device has been fully rolled out with cookie protobuf change
+                try:
+                    onu_id_uni_id = self.resource_mgr.get_onu_uni_from_ponport_gemport(pkt_indication.intf_id,
+                                                                                       pkt_indication.gemport_id)
+                    onu_id = int(onu_id_uni_id[0])
+                    uni_id = int(onu_id_uni_id[1])
+                    self.log.debug("packet indication-kv", onu_id=onu_id, uni_id=uni_id)
+                    if onu_id is None:
+                        raise Exception("onu-id-none")
+                    if uni_id is None:
+                        raise Exception("uni-id-none")
+                    logical_port_num = self.platform.mk_uni_port_num(pkt_indication.intf_id, onu_id, uni_id)
+                except Exception as e:
+                    self.log.error("no-onu-reference-for-gem",
+                                   gemport_id=pkt_indication.gemport_id, e=e)
+                    return
+
+
+        elif pkt_indication.intf_type == "nni":
+            logical_port_num = self.platform.intf_id_to_port_no(
+                pkt_indication.intf_id,
+                Port.ETHERNET_NNI)
+
+        pkt = Ether(pkt_indication.pkt)
+
+        self.log.debug("packet indication",
+                       logical_device_id=self.logical_device_id,
+                       logical_port_no=logical_port_num)
+
+        self.adapter_agent.send_packet_in(
+            logical_device_id=self.logical_device_id,
+            logical_port_no=logical_port_num,
+            packet=str(pkt))
+
+    def packet_out(self, egress_port, msg):
+        pkt = Ether(msg)
+        self.log.debug('packet out', egress_port=egress_port,
+                       device_id=self.device_id,
+                       logical_device_id=self.logical_device_id,
+                       packet=str(pkt).encode("HEX"))
+
+        # Find port type
+        egress_port_type = self.platform.intf_id_to_port_type_name(egress_port)
+        if egress_port_type == Port.ETHERNET_UNI:
+
+            if pkt.haslayer(Dot1Q):
+                outer_shim = pkt.getlayer(Dot1Q)
+                if isinstance(outer_shim.payload, Dot1Q):
+                    # If double tag, remove the outer tag
+                    payload = (
+                            Ether(src=pkt.src, dst=pkt.dst, type=outer_shim.type) /
+                            outer_shim.payload
+                    )
+                else:
+                    payload = pkt
+            else:
+                payload = pkt
+
+            send_pkt = binascii.unhexlify(str(payload).encode("HEX"))
+
+            self.log.debug(
+                'sending-packet-to-ONU', egress_port=egress_port,
+                intf_id=self.platform.intf_id_from_uni_port_num(egress_port),
+                onu_id=self.platform.onu_id_from_port_num(egress_port),
+                uni_id=self.platform.uni_id_from_port_num(egress_port),
+                port_no=egress_port,
+                packet=str(payload).encode("HEX"))
+
+            onu_pkt = openolt_pb2.OnuPacket(
+                intf_id=self.platform.intf_id_from_uni_port_num(egress_port),
+                onu_id=self.platform.onu_id_from_port_num(egress_port),
+                port_no=egress_port,
+                pkt=send_pkt)
+
+            self.stub.OnuPacketOut(onu_pkt)
+
+        elif egress_port_type == Port.ETHERNET_NNI:
+            self.log.debug('sending-packet-to-uplink', egress_port=egress_port,
+                           packet=str(pkt).encode("HEX"))
+
+            send_pkt = binascii.unhexlify(str(pkt).encode("HEX"))
+
+            uplink_pkt = openolt_pb2.UplinkPacket(
+                intf_id=self.platform.intf_id_from_nni_port_num(egress_port),
+                pkt=send_pkt)
+
+            self.stub.UplinkPacketOut(uplink_pkt)
+
+        else:
+            self.log.warn('Packet-out-to-this-interface-type-not-implemented',
+                          egress_port=egress_port,
+                          port_type=egress_port_type)
+
+    def send_proxied_message(self, proxy_address, msg):
+        onu_device = self.adapter_agent.get_child_device(
+            self.device_id, onu_id=proxy_address.onu_id,
+            parent_port_no=self.platform.intf_id_to_port_no(
+                proxy_address.channel_id, Port.PON_OLT)
+        )
+        if onu_device.connect_status != ConnectStatus.REACHABLE:
+            self.log.debug('ONU is not reachable, cannot send OMCI',
+                           serial_number=onu_device.serial_number,
+                           intf_id=onu_device.proxy_address.channel_id,
+                           onu_id=onu_device.proxy_address.onu_id)
+            return
+        omci = openolt_pb2.OmciMsg(intf_id=proxy_address.channel_id,
+                                   onu_id=proxy_address.onu_id, pkt=str(msg))
+        self.stub.OmciMsgOut(omci)
+
+    def add_onu_device(self, intf_id, port_no, onu_id, serial_number):
+        self.log.info("Adding ONU", port_no=port_no, onu_id=onu_id,
+                      serial_number=serial_number)
+
+        # NOTE - channel_id of onu is set to intf_id
+        proxy_address = Device.ProxyAddress(device_id=self.device_id,
+                                            channel_id=intf_id, onu_id=onu_id,
+                                            onu_session_id=onu_id)
+
+        self.log.debug("Adding ONU", proxy_address=proxy_address)
+
+        serial_number_str = self.stringify_serial_number(serial_number)
+
+        self.adapter_agent.add_onu_device(
+            parent_device_id=self.device_id, parent_port_no=port_no,
+            vendor_id=serial_number.vendor_id, proxy_address=proxy_address,
+            root=True, serial_number=serial_number_str,
+            admin_state=AdminState.ENABLED#, **{'vlan':4091} # magic still maps to brcm_openomci_onu.pon_port.BRDCM_DEFAULT_VLAN
+        )
+
+    def port_name(self, port_no, port_type, intf_id=None, serial_number=None):
+        if port_type is Port.ETHERNET_NNI:
+            return "nni-" + str(port_no)
+        elif port_type is Port.PON_OLT:
+            return "pon" + str(intf_id)
+        elif port_type is Port.ETHERNET_UNI:
+            assert False, 'local UNI management not supported'
+
+    def add_logical_port(self, port_no, intf_id, oper_state):
+        self.log.info('adding-logical-port', port_no=port_no)
+
+        label = self.port_name(port_no, Port.ETHERNET_NNI)
+
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        curr_speed = OFPPF_1GB_FD
+        max_speed = OFPPF_1GB_FD
+
+        if oper_state == OperStatus.ACTIVE:
+            of_oper_state = OFPPS_LIVE
+        else:
+            of_oper_state = OFPPS_LINK_DOWN
+
+        ofp = ofp_port(
+            port_no=port_no,
+            hw_addr=mac_str_to_tuple(self._get_mac_form_port_no(port_no)),
+            name=label, config=0, state=of_oper_state, curr=cap,
+            advertised=cap, peer=cap, curr_speed=curr_speed,
+            max_speed=max_speed)
+
+        ofp_stats = ofp_port_stats(port_no=port_no)
+
+        logical_port = LogicalPort(
+            id=label, ofp_port=ofp, device_id=self.device_id,
+            device_port_no=port_no, root_port=True,
+            ofp_port_stats=ofp_stats)
+
+        self.adapter_agent.add_logical_port(self.logical_device_id,
+                                            logical_port)
+
+    def _get_mac_form_port_no(self, port_no):
+        mac = ''
+        for i in range(4):
+            mac = ':%02x' % ((port_no >> (i * 8)) & 0xff) + mac
+        return '00:00' + mac
+
+    def add_port(self, intf_id, port_type, oper_status):
+        port_no = self.platform.intf_id_to_port_no(intf_id, port_type)
+
+        label = self.port_name(port_no, port_type, intf_id)
+
+        self.log.debug('adding-port', port_no=port_no, label=label,
+                       port_type=port_type)
+
+        port = Port(port_no=port_no, label=label, type=port_type,
+                    admin_state=AdminState.ENABLED, oper_status=oper_status)
+
+        self.adapter_agent.add_port(self.device_id, port)
+
+        return port_no, label
+
+    def delete_logical_port(self, child_device):
+        logical_ports = self.proxy.get('/logical_devices/{}/ports'.format(
+            self.logical_device_id))
+        for logical_port in logical_ports:
+            if logical_port.device_id == child_device.id:
+                self.log.debug('delete-logical-port',
+                               onu_device_id=child_device.id,
+                               logical_port=logical_port)
+                self.flow_mgr.clear_flows_and_scheduler_for_logical_port(
+                    child_device, logical_port)
+                self.adapter_agent.delete_logical_port(
+                    self.logical_device_id, logical_port)
+                return
+
+    def delete_port(self, child_serial_number):
+        ports = self.proxy.get('/devices/{}/ports'.format(
+            self.device_id))
+        for port in ports:
+            if port.label == child_serial_number:
+                self.log.debug('delete-port',
+                               onu_serial_number=child_serial_number,
+                               port=port)
+                self.adapter_agent.delete_port(self.device_id, port)
+                return
+
+    def update_flow_table(self, flows):
+        self.log.debug('No updates here now, all is done in logical flows '
+                       'update')
+
+    def update_logical_flows(self, flows_to_add, flows_to_remove,
+                             device_rules_map):
+        if not self.is_state_up():
+            self.log.info('The OLT is not up, we cannot update flows',
+                          flows_to_add=[f.id for f in flows_to_add],
+                          flows_to_remove=[f.id for f in flows_to_remove])
+            return
+
+        try:
+            self.flow_mgr.update_children_flows(device_rules_map)
+        except Exception as e:
+            self.log.error('Error updating children flows', error=e)
+
+        self.log.debug('logical flows update', flows_to_add=flows_to_add,
+                       flows_to_remove=flows_to_remove)
+
+        for flow in flows_to_add:
+
+            try:
+                self.flow_mgr.add_flow(flow)
+            except Exception as e:
+                self.log.error('failed to add flow', flow=flow, e=e)
+
+        for flow in flows_to_remove:
+
+            try:
+                self.flow_mgr.remove_flow(flow)
+            except Exception as e:
+                self.log.error('failed to remove flow', flow=flow, e=e)
+
+        self.flow_mgr.repush_all_different_flows()
+
+    # There has to be a better way to do this
+    def ip_hex(self, ip):
+        octets = ip.split(".")
+        hex_ip = []
+        for octet in octets:
+            octet_hex = hex(int(octet))
+            octet_hex = octet_hex.split('0x')[1]
+            octet_hex = octet_hex.rjust(2, '0')
+            hex_ip.append(octet_hex)
+        return ":".join(hex_ip)
+
+    def stringify_vendor_specific(self, vendor_specific):
+        return ''.join(str(i) for i in [
+            hex(ord(vendor_specific[0]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[0]) & 0x0f)[2:],
+            hex(ord(vendor_specific[1]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[1]) & 0x0f)[2:],
+            hex(ord(vendor_specific[2]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[2]) & 0x0f)[2:],
+            hex(ord(vendor_specific[3]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[3]) & 0x0f)[2:]])
+
+    def stringify_serial_number(self, serial_number):
+        return ''.join([serial_number.vendor_id,
+                        self.stringify_vendor_specific(
+                            serial_number.vendor_specific)])
+
+    def destringify_serial_number(self, serial_number_str):
+        serial_number = openolt_pb2.SerialNumber(
+            vendor_id=serial_number_str[:4].encode('utf-8'),
+            vendor_specific=binascii.unhexlify(serial_number_str[4:]))
+        return serial_number
+
+    def disable(self):
+        self.log.debug('sending-deactivate-olt-message',
+                       device_id=self.device_id)
+
+        try:
+            # Send grpc call
+            self.stub.DisableOlt(openolt_pb2.Empty())
+            # The resulting indication will bring the OLT down
+            # self.go_state_down()
+            self.log.info('openolt device disabled')
+        except Exception as e:
+            self.log.error('Failure to disable openolt device', error=e)
+
+    def delete(self):
+        self.log.info('deleting-olt', device_id=self.device_id,
+                      logical_device_id=self.logical_device_id)
+
+        # Clears up the data from the resource manager KV store
+        # for the device
+        del self.resource_mgr
+
+        try:
+            # Rebooting to reset the state
+            self.reboot()
+            # Removing logical device
+            ld = self.adapter_agent.get_logical_device(self.logical_device_id)
+            self.adapter_agent.delete_logical_device(ld)
+        except Exception as e:
+            self.log.error('Failure to delete openolt device', error=e)
+            raise e
+        else:
+            self.log.info('successfully-deleted-olt', device_id=self.device_id)
+
+    def reenable(self):
+        self.log.debug('reenabling-olt', device_id=self.device_id)
+
+        try:
+            self.stub.ReenableOlt(openolt_pb2.Empty())
+
+            self.log.info('enabling-all-ports', device_id=self.device_id)
+            self.adapter_agent.enable_all_ports(self.device_id)
+        except Exception as e:
+            self.log.error('Failure to reenable openolt device', error=e)
+        else:
+            self.log.info('openolt device reenabled')
+
+    def activate_onu(self, intf_id, onu_id, serial_number,
+                     serial_number_str):
+        pir = self.bw_mgr.pir(serial_number_str)
+        self.log.debug("activating-onu", intf_id=intf_id, onu_id=onu_id,
+                       serial_number_str=serial_number_str,
+                       serial_number=serial_number, pir=pir)
+        onu = openolt_pb2.Onu(intf_id=intf_id, onu_id=onu_id,
+                              serial_number=serial_number, pir=pir)
+        self.stub.ActivateOnu(onu)
+        self.log.info('onu-activated', serial_number=serial_number_str)
+
+    def delete_child_device(self, child_device):
+        self.log.debug('sending-deactivate-onu',
+                       olt_device_id=self.device_id,
+                       onu_device=child_device,
+                       onu_serial_number=child_device.serial_number)
+        try:
+            self.adapter_agent.delete_child_device(self.device_id,
+                                                   child_device.id,
+                                                   child_device)
+        except Exception as e:
+            self.log.error('adapter_agent error', error=e)
+        try:
+            self.delete_logical_port(child_device)
+        except Exception as e:
+            self.log.error('logical_port delete error', error=e)
+        try:
+            self.delete_port(child_device.serial_number)
+        except Exception as e:
+            self.log.error('port delete error', error=e)
+        serial_number = self.destringify_serial_number(
+            child_device.serial_number)
+        # TODO FIXME - For each uni.
+        # TODO FIXME - Flows are not deleted
+        uni_id = 0  # FIXME
+        self.flow_mgr.delete_tech_profile_instance(
+                    child_device.proxy_address.channel_id,
+                    child_device.proxy_address.onu_id,
+                    uni_id
+        )
+        pon_intf_id_onu_id = (child_device.proxy_address.channel_id,
+                              child_device.proxy_address.onu_id,
+                              uni_id)
+        # Free any PON resources that were reserved for the ONU
+        self.resource_mgr.free_pon_resources_for_onu(pon_intf_id_onu_id)
+
+        onu = openolt_pb2.Onu(intf_id=child_device.proxy_address.channel_id,
+                              onu_id=child_device.proxy_address.onu_id,
+                              serial_number=serial_number)
+        self.stub.DeleteOnu(onu)
+
+    def reboot(self):
+        self.log.debug('rebooting openolt device', device_id=self.device_id)
+        try:
+            self.stub.Reboot(openolt_pb2.Empty())
+        except Exception as e:
+            self.log.error('something went wrong with the reboot', error=e)
+        else:
+            self.log.info('device rebooted')
+
+    def trigger_statistics_collection(self):
+        try:
+            self.stub.CollectStatistics(openolt_pb2.Empty())
+        except Exception as e:
+            self.log.error('Error while triggering statistics collection',
+                           error=e)
+        else:
+            self.log.info('statistics requested')
+
+    def simulate_alarm(self, alarm):
+        self.alarm_mgr.simulate_alarm(alarm)
diff --git a/python/adapters/openolt/openolt_flow_mgr.py b/python/adapters/openolt/openolt_flow_mgr.py
new file mode 100644
index 0000000..e298db2
--- /dev/null
+++ b/python/adapters/openolt/openolt_flow_mgr.py
@@ -0,0 +1,1062 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import copy
+from twisted.internet import reactor
+import grpc
+from google.protobuf.json_format import MessageToDict
+import hashlib
+from simplejson import dumps
+
+from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, \
+    ofp_flow_stats, OFPMT_OXM, Flows, FlowGroups, OFPXMT_OFB_IN_PORT, \
+    OFPXMT_OFB_VLAN_VID
+from voltha.protos.device_pb2 import Port
+import voltha.core.flow_decomposer as fd
+from voltha.adapters.openolt.protos import openolt_pb2
+from voltha.registry import registry
+
+from common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
+
+# Flow categories
+HSIA_FLOW = "HSIA_FLOW"
+
+EAP_ETH_TYPE = 0x888e
+LLDP_ETH_TYPE = 0x88cc
+
+IGMP_PROTO = 2
+
+# FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py
+DEFAULT_MGMT_VLAN = 4091
+
+# Openolt Flow
+UPSTREAM = "upstream"
+DOWNSTREAM = "downstream"
+PACKET_TAG_TYPE = "pkt_tag_type"
+UNTAGGED = "untagged"
+SINGLE_TAG = "single_tag"
+DOUBLE_TAG = "double_tag"
+
+# Classifier
+ETH_TYPE = 'eth_type'
+TPID = 'tpid'
+IP_PROTO = 'ip_proto'
+IN_PORT = 'in_port'
+VLAN_VID = 'vlan_vid'
+VLAN_PCP = 'vlan_pcp'
+UDP_DST = 'udp_dst'
+UDP_SRC = 'udp_src'
+IPV4_DST = 'ipv4_dst'
+IPV4_SRC = 'ipv4_src'
+METADATA = 'metadata'
+OUTPUT = 'output'
+# Action
+POP_VLAN = 'pop_vlan'
+PUSH_VLAN = 'push_vlan'
+TRAP_TO_HOST = 'trap_to_host'
+
+
+class OpenOltFlowMgr(object):
+
+    def __init__(self, adapter_agent, log, stub, device_id, logical_device_id,
+                 platform, resource_mgr):
+        self.adapter_agent = adapter_agent
+        self.log = log
+        self.stub = stub
+        self.device_id = device_id
+        self.logical_device_id = logical_device_id
+        self.nni_intf_id = None
+        self.platform = platform
+        self.logical_flows_proxy = registry('core').get_proxy(
+            '/logical_devices/{}/flows'.format(self.logical_device_id))
+        self.flows_proxy = registry('core').get_proxy(
+            '/devices/{}/flows'.format(self.device_id))
+        self.root_proxy = registry('core').get_proxy('/')
+        self.resource_mgr = resource_mgr
+        self.tech_profile = dict()
+        self._populate_tech_profile_per_pon_port()
+        self.retry_add_flow_list = []
+
+    def add_flow(self, flow):
+        self.log.debug('add flow', flow=flow)
+        classifier_info = dict()
+        action_info = dict()
+
+        for field in fd.get_ofb_fields(flow):
+            if field.type == fd.ETH_TYPE:
+                classifier_info[ETH_TYPE] = field.eth_type
+                self.log.debug('field-type-eth-type',
+                               eth_type=classifier_info[ETH_TYPE])
+            elif field.type == fd.IP_PROTO:
+                classifier_info[IP_PROTO] = field.ip_proto
+                self.log.debug('field-type-ip-proto',
+                               ip_proto=classifier_info[IP_PROTO])
+            elif field.type == fd.IN_PORT:
+                classifier_info[IN_PORT] = field.port
+                self.log.debug('field-type-in-port',
+                               in_port=classifier_info[IN_PORT])
+            elif field.type == fd.VLAN_VID:
+                classifier_info[VLAN_VID] = field.vlan_vid & 0xfff
+                self.log.debug('field-type-vlan-vid',
+                               vlan=classifier_info[VLAN_VID])
+            elif field.type == fd.VLAN_PCP:
+                classifier_info[VLAN_PCP] = field.vlan_pcp
+                self.log.debug('field-type-vlan-pcp',
+                               pcp=classifier_info[VLAN_PCP])
+            elif field.type == fd.UDP_DST:
+                classifier_info[UDP_DST] = field.udp_dst
+                self.log.debug('field-type-udp-dst',
+                               udp_dst=classifier_info[UDP_DST])
+            elif field.type == fd.UDP_SRC:
+                classifier_info[UDP_SRC] = field.udp_src
+                self.log.debug('field-type-udp-src',
+                               udp_src=classifier_info[UDP_SRC])
+            elif field.type == fd.IPV4_DST:
+                classifier_info[IPV4_DST] = field.ipv4_dst
+                self.log.debug('field-type-ipv4-dst',
+                               ipv4_dst=classifier_info[IPV4_DST])
+            elif field.type == fd.IPV4_SRC:
+                classifier_info[IPV4_SRC] = field.ipv4_src
+                self.log.debug('field-type-ipv4-src',
+                               ipv4_dst=classifier_info[IPV4_SRC])
+            elif field.type == fd.METADATA:
+                classifier_info[METADATA] = field.table_metadata
+                self.log.debug('field-type-metadata',
+                               metadata=classifier_info[METADATA])
+            else:
+                raise NotImplementedError('field.type={}'.format(
+                    field.type))
+
+        for action in fd.get_actions(flow):
+            if action.type == fd.OUTPUT:
+                action_info[OUTPUT] = action.output.port
+                self.log.debug('action-type-output',
+                               output=action_info[OUTPUT],
+                               in_port=classifier_info[IN_PORT])
+            elif action.type == fd.POP_VLAN:
+                if fd.get_goto_table_id(flow) is None:
+                    self.log.debug('being taken care of by ONU', flow=flow)
+                    return
+                action_info[POP_VLAN] = True
+                self.log.debug('action-type-pop-vlan',
+                               in_port=classifier_info[IN_PORT])
+            elif action.type == fd.PUSH_VLAN:
+                action_info[PUSH_VLAN] = True
+                action_info[TPID] = action.push.ethertype
+                self.log.debug('action-type-push-vlan',
+                               push_tpid=action_info[TPID], in_port=classifier_info[IN_PORT])
+                if action.push.ethertype != 0x8100:
+                    self.log.error('unhandled-tpid',
+                                   ethertype=action.push.ethertype)
+            elif action.type == fd.SET_FIELD:
+                # action_info['action_type'] = 'set_field'
+                _field = action.set_field.field.ofb_field
+                assert (action.set_field.field.oxm_class ==
+                        OFPXMC_OPENFLOW_BASIC)
+                self.log.debug('action-type-set-field',
+                               field=_field, in_port=classifier_info[IN_PORT])
+                if _field.type == fd.VLAN_VID:
+                    self.log.debug('set-field-type-vlan-vid',
+                                   vlan_vid=_field.vlan_vid & 0xfff)
+                    action_info[VLAN_VID] = (_field.vlan_vid & 0xfff)
+                else:
+                    self.log.error('unsupported-action-set-field-type',
+                                   field_type=_field.type)
+            else:
+                self.log.error('unsupported-action-type',
+                               action_type=action.type, in_port=classifier_info[IN_PORT])
+
+        if fd.get_goto_table_id(flow) is not None and POP_VLAN not in action_info:
+            self.log.debug('being taken care of by ONU', flow=flow)
+            return
+
+        if OUTPUT not in action_info and METADATA in classifier_info:
+            # find flow in the next table
+            next_flow = self.find_next_flow(flow)
+            if next_flow is None:
+                return
+            action_info[OUTPUT] = fd.get_out_port(next_flow)
+            for field in fd.get_ofb_fields(next_flow):
+                if field.type == fd.VLAN_VID:
+                    classifier_info[METADATA] = field.vlan_vid & 0xfff
+
+        self.log.debug('flow-ports', classifier_inport=classifier_info[IN_PORT], action_output=action_info[OUTPUT])
+        (port_no, intf_id, onu_id, uni_id) = self.platform.extract_access_from_flow(
+            classifier_info[IN_PORT], action_info[OUTPUT])
+
+        self.divide_and_add_flow(intf_id, onu_id, uni_id, port_no, classifier_info,
+                                 action_info, flow)
+
+    def _is_uni_port(self, port_no):
+        try:
+            port = self.adapter_agent.get_logical_port(self.logical_device_id,
+                                                       'uni-{}'.format(port_no))
+            if port is not None:
+                return (not port.root_port), port.device_id
+            else:
+                return False, None
+        except Exception as e:
+            self.log.error("error-retrieving-port", e=e)
+            return False, None
+
+    def _clear_flow_id_from_rm(self, flow, flow_id, flow_direction):
+        uni_port_no = None
+        child_device_id = None
+        if flow_direction == UPSTREAM:
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.IN_PORT:
+                    is_uni, child_device_id = self._is_uni_port(field.port)
+                    if is_uni:
+                        uni_port_no = field.port
+        elif flow_direction == DOWNSTREAM:
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.METADATA:
+                    uni_port = field.table_metadata & 0xFFFFFFFF
+                    is_uni, child_device_id = self._is_uni_port(uni_port)
+                    if is_uni:
+                        uni_port_no = field.port
+
+            if uni_port_no is None:
+                for action in fd.get_actions(flow):
+                    if action.type == fd.OUTPUT:
+                        is_uni, child_device_id = \
+                            self._is_uni_port(action.output.port)
+                        if is_uni:
+                            uni_port_no = action.output.port
+
+        if child_device_id:
+            child_device = self.adapter_agent.get_device(child_device_id)
+            pon_intf = child_device.proxy_address.channel_id
+            onu_id = child_device.proxy_address.onu_id
+            uni_id = self.platform.uni_id_from_port_num(uni_port_no) if uni_port_no is not None else None
+            flows = self.resource_mgr.get_flow_id_info(pon_intf, onu_id, uni_id, flow_id)
+            assert (isinstance(flows, list))
+            self.log.debug("retrieved-flows", flows=flows)
+            for idx in range(len(flows)):
+                if flow_direction == flows[idx]['flow_type']:
+                    flows.pop(idx)
+                    self.update_flow_info_to_kv_store(pon_intf, onu_id, uni_id, flow_id, flows)
+                    if len(flows) > 0:
+                        # There are still flows referencing the same flow_id.
+                        # So the flow should not be freed yet.
+                        # For ex: Case of HSIA where same flow is shared
+                        # between DS and US.
+                        return
+
+            self.resource_mgr.free_flow_id_for_uni(pon_intf, onu_id, uni_id, flow_id)
+        else:
+            self.log.error("invalid-info", uni_port_no=uni_port_no,
+                           child_device_id=child_device_id)
+
+    def retry_add_flow(self, flow):
+        self.log.debug("retry-add-flow")
+        if flow.id in self.retry_add_flow_list:
+            self.retry_add_flow_list.remove(flow.id)
+        self.add_flow(flow)
+
+    def remove_flow(self, flow):
+        self.log.debug('trying to remove flows from logical flow :',
+                       logical_flow=flow)
+        device_flows_to_remove = []
+        device_flows = self.flows_proxy.get('/').items
+        for f in device_flows:
+            if f.cookie == flow.id:
+                device_flows_to_remove.append(f)
+
+        for f in device_flows_to_remove:
+            (id, direction) = self.decode_stored_id(f.id)
+            flow_to_remove = openolt_pb2.Flow(flow_id=id, flow_type=direction)
+            try:
+                self.stub.FlowRemove(flow_to_remove)
+            except grpc.RpcError as grpc_e:
+                if grpc_e.code() == grpc.StatusCode.NOT_FOUND:
+                    self.log.debug('This flow does not exist on the switch, '
+                                   'normal after an OLT reboot',
+                                   flow=flow_to_remove)
+                else:
+                    raise grpc_e
+
+            # once we have successfully deleted the flow on the device
+            # release the flow_id on resource pool and also clear any
+            # data associated with the flow_id on KV store.
+            self._clear_flow_id_from_rm(f, id, direction)
+            self.log.debug('flow removed from device', flow=f,
+                           flow_key=flow_to_remove)
+
+        if len(device_flows_to_remove) > 0:
+            new_flows = []
+            flows_ids_to_remove = [f.id for f in device_flows_to_remove]
+            for f in device_flows:
+                if f.id not in flows_ids_to_remove:
+                    new_flows.append(f)
+
+            self.flows_proxy.update('/', Flows(items=new_flows))
+            self.log.debug('flows removed from the data store',
+                           flow_ids_removed=flows_ids_to_remove,
+                           number_of_flows_removed=(len(device_flows) - len(
+                               new_flows)), expected_flows_removed=len(
+                    device_flows_to_remove))
+        else:
+            self.log.debug('no device flow to remove for this flow (normal '
+                           'for multi table flows)', flow=flow)
+
+    def _get_ofp_port_name(self, intf_id, onu_id, uni_id):
+        parent_port_no = self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT)
+        child_device = self.adapter_agent.get_child_device(self.device_id,
+                                                           parent_port_no=parent_port_no, onu_id=onu_id)
+        if child_device is None:
+            self.log.error("could-not-find-child-device",
+                           parent_port_no=intf_id, onu_id=onu_id)
+            return (None, None)
+        ports = self.adapter_agent.get_ports(child_device.id, Port.ETHERNET_UNI)
+        logical_port = self.adapter_agent.get_logical_port(
+            self.logical_device_id, ports[uni_id].label)
+        ofp_port_name = (logical_port.ofp_port.name, logical_port.ofp_port.port_no)
+        return ofp_port_name
+
+    def get_tp_path(self, intf_id, ofp_port_name):
+        # FIXME Should get Table id form the flow, as of now hardcoded to
+        # DEFAULT_TECH_PROFILE_TABLE_ID (64)
+        # 'tp_path' contains the suffix part of the tech_profile_instance path.
+        # The prefix to the 'tp_path' should be set to \
+        # TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX by the ONU adapter.
+        return self.tech_profile[intf_id]. \
+            get_tp_path(DEFAULT_TECH_PROFILE_TABLE_ID,
+                        ofp_port_name)
+
+    def delete_tech_profile_instance(self, intf_id, onu_id, uni_id):
+        # Remove the TP instance associated with the ONU
+        ofp_port_name = self._get_ofp_port_name(intf_id, onu_id, uni_id)
+        tp_path = self.get_tp_path(intf_id, ofp_port_name)
+        return self.tech_profile[intf_id].delete_tech_profile_instance(tp_path)
+
+    def divide_and_add_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
+                            action, flow):
+
+        self.log.debug('sorting flow', intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, port_no=port_no,
+                       classifier=classifier, action=action)
+
+        alloc_id, gem_ports = self.create_tcont_gemport(intf_id, onu_id, uni_id,
+                                                        flow.table_id)
+        if alloc_id is None or gem_ports is None:
+            self.log.error("alloc-id-gem-ports-unavailable", alloc_id=alloc_id,
+                           gem_ports=gem_ports)
+            return
+
+        self.log.debug('Generated required alloc and gemport ids',
+                       alloc_id=alloc_id, gemports=gem_ports)
+
+        # Flows can't be added specific to gemport unless p-bits are received.
+        # Hence adding flows for all gemports
+        for gemport_id in gem_ports:
+            if IP_PROTO in classifier:
+                if classifier[IP_PROTO] == 17:
+                    self.log.debug('dhcp flow add')
+                    self.add_dhcp_trap(intf_id, onu_id, uni_id, port_no, classifier,
+                                       action, flow, alloc_id, gemport_id)
+                elif classifier[IP_PROTO] == 2:
+                    self.log.warn('igmp flow add ignored, not implemented yet')
+                else:
+                    self.log.warn("Invalid-Classifier-to-handle",
+                                  classifier=classifier,
+                                  action=action)
+            elif ETH_TYPE in classifier:
+                if classifier[ETH_TYPE] == EAP_ETH_TYPE:
+                    self.log.debug('eapol flow add')
+                    self.add_eapol_flow(intf_id, onu_id, uni_id, port_no, flow, alloc_id,
+                                        gemport_id)
+                    vlan_id = self.get_subscriber_vlan(fd.get_in_port(flow))
+                    if vlan_id is not None:
+                        self.add_eapol_flow(
+                            intf_id, onu_id, uni_id, port_no, flow, alloc_id, gemport_id,
+                            vlan_id=vlan_id)
+                    parent_port_no = self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT)
+                    onu_device = self.adapter_agent.get_child_device(self.device_id,
+                                                                     onu_id=onu_id,
+                                                                     parent_port_no=parent_port_no)
+                    (ofp_port_name, ofp_port_no) = self._get_ofp_port_name(intf_id, onu_id, uni_id)
+                    if ofp_port_name is None:
+                        self.log.error("port-name-not-found")
+                        return
+
+                    tp_path = self.get_tp_path(intf_id, ofp_port_name)
+
+                    self.log.debug('Load-tech-profile-request-to-brcm-handler',
+                                   tp_path=tp_path)
+                    msg = {'proxy_address': onu_device.proxy_address, 'uni_id': uni_id,
+                           'event': 'download_tech_profile', 'event_data': tp_path}
+
+                    # Send the event message to the ONU adapter
+                    self.adapter_agent.publish_inter_adapter_message(onu_device.id,
+                                                                     msg)
+
+                if classifier[ETH_TYPE] == LLDP_ETH_TYPE:
+                    self.log.debug('lldp flow add')
+                    nni_intf_id = self.get_nni_intf_id()
+                    self.add_lldp_flow(flow, port_no, nni_intf_id)
+
+            elif PUSH_VLAN in action:
+                self.add_upstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
+                                            action, flow, alloc_id, gemport_id)
+            elif POP_VLAN in action:
+                self.add_downstream_data_flow(intf_id, onu_id, uni_id, port_no, classifier,
+                                              action, flow, alloc_id, gemport_id)
+            else:
+                self.log.debug('Invalid-flow-type-to-handle',
+                               classifier=classifier,
+                               action=action, flow=flow)
+
+    def create_tcont_gemport(self, intf_id, onu_id, uni_id, table_id):
+        alloc_id, gem_port_ids = None, None
+        pon_intf_onu_id = (intf_id, onu_id)
+
+        # If we already have allocated alloc_id and gem_ports earlier, render them
+        alloc_id = \
+            self.resource_mgr.get_current_alloc_ids_for_onu(pon_intf_onu_id)
+        gem_port_ids = \
+            self.resource_mgr.get_current_gemport_ids_for_onu(pon_intf_onu_id)
+        if alloc_id is not None and gem_port_ids is not None:
+            return alloc_id, gem_port_ids
+
+        try:
+            (ofp_port_name, ofp_port_no) = self._get_ofp_port_name(intf_id, onu_id, uni_id)
+            if ofp_port_name is None:
+                self.log.error("port-name-not-found")
+                return alloc_id, gem_port_ids
+            # FIXME: If table id is <= 63 using 64 as table id
+            if table_id < DEFAULT_TECH_PROFILE_TABLE_ID:
+                table_id = DEFAULT_TECH_PROFILE_TABLE_ID
+
+            # Check tech profile instance already exists for derived port name
+            tech_profile_instance = self.tech_profile[intf_id]. \
+                get_tech_profile_instance(table_id, ofp_port_name)
+            self.log.debug('Get-tech-profile-instance-status', tech_profile_instance=tech_profile_instance)
+
+            if tech_profile_instance is None:
+                # create tech profile instance
+                tech_profile_instance = self.tech_profile[intf_id]. \
+                    create_tech_profile_instance(table_id, ofp_port_name,
+                                                 intf_id)
+                if tech_profile_instance is None:
+                    raise Exception('Tech-profile-instance-creation-failed')
+            else:
+                self.log.debug(
+                    'Tech-profile-instance-already-exist-for-given port-name',
+                    ofp_port_name=ofp_port_name)
+
+            # upstream scheduler
+            us_scheduler = self.tech_profile[intf_id].get_us_scheduler(
+                tech_profile_instance)
+            # downstream scheduler
+            ds_scheduler = self.tech_profile[intf_id].get_ds_scheduler(
+                tech_profile_instance)
+            # create Tcont
+            tconts = self.tech_profile[intf_id].get_tconts(tech_profile_instance,
+                                                           us_scheduler,
+                                                           ds_scheduler)
+
+            self.stub.CreateTconts(openolt_pb2.Tconts(intf_id=intf_id,
+                                                      onu_id=onu_id,
+                                                      uni_id=uni_id,
+                                                      port_no=ofp_port_no,
+                                                      tconts=tconts))
+
+            # Fetch alloc id and gemports from tech profile instance
+            alloc_id = tech_profile_instance.us_scheduler.alloc_id
+            gem_port_ids = []
+            for i in range(len(
+                    tech_profile_instance.upstream_gem_port_attribute_list)):
+                gem_port_ids.append(
+                    tech_profile_instance.upstream_gem_port_attribute_list[i].
+                    gemport_id)
+        except BaseException as e:
+            self.log.exception(exception=e)
+
+        # Update the allocated alloc_id and gem_port_id for the ONU/UNI to KV store
+        pon_intf_onu_id = (intf_id, onu_id, uni_id)
+        self.resource_mgr.resource_mgrs[intf_id].update_alloc_ids_for_onu(
+            pon_intf_onu_id,
+            list([alloc_id])
+        )
+        self.resource_mgr.resource_mgrs[intf_id].update_gemport_ids_for_onu(
+            pon_intf_onu_id,
+            gem_port_ids
+        )
+
+        self.resource_mgr.update_gemports_ponport_to_onu_map_on_kv_store(
+            gem_port_ids, intf_id, onu_id, uni_id
+        )
+
+        return alloc_id, gem_port_ids
+
+    def add_upstream_data_flow(self, intf_id, onu_id, uni_id, port_no, uplink_classifier,
+                               uplink_action, logical_flow, alloc_id,
+                               gemport_id):
+
+        uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
+
+        self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, uplink_classifier,
+                           uplink_action, UPSTREAM,
+                           logical_flow, alloc_id, gemport_id)
+
+        # Secondary EAP on the subscriber vlan
+        (eap_active, eap_logical_flow) = self.is_eap_enabled(intf_id, onu_id, uni_id)
+        if eap_active:
+            self.add_eapol_flow(intf_id, onu_id, uni_id, port_no, eap_logical_flow, alloc_id,
+                                gemport_id, vlan_id=uplink_classifier[VLAN_VID])
+
+    def add_downstream_data_flow(self, intf_id, onu_id, uni_id, port_no, downlink_classifier,
+                                 downlink_action, flow, alloc_id, gemport_id):
+        downlink_classifier[PACKET_TAG_TYPE] = DOUBLE_TAG
+        # Needed ???? It should be already there
+        downlink_action[POP_VLAN] = True
+        downlink_action[VLAN_VID] = downlink_classifier[VLAN_VID]
+
+        self.add_hsia_flow(intf_id, onu_id, uni_id, port_no, downlink_classifier,
+                           downlink_action, DOWNSTREAM,
+                           flow, alloc_id, gemport_id)
+
+    def add_hsia_flow(self, intf_id, onu_id, uni_id, port_no, classifier, action,
+                      direction, logical_flow, alloc_id, gemport_id):
+
+        flow_store_cookie = self._get_flow_store_cookie(classifier,
+                                                        gemport_id)
+
+        # One of the OLT platform (Broadcom BAL) requires that symmetric
+        # flows require the same flow_id to be used across UL and DL.
+        # Since HSIA flow is the only symmetric flow currently, we need to
+        # re-use the flow_id across both direction. The 'flow_category'
+        # takes priority over flow_cookie to find any available HSIA_FLOW
+        # id for the ONU.
+        flow_id = self.resource_mgr.get_flow_id(intf_id, onu_id, uni_id,
+                                                flow_store_cookie,
+                                                HSIA_FLOW)
+        if flow_id is None:
+            self.log.error("hsia-flow-unavailable")
+            return
+        flow = openolt_pb2.Flow(
+            access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=flow_id,
+            flow_type=direction, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            gemport_id=gemport_id,
+            classifier=self.mk_classifier(classifier),
+            action=self.mk_action(action),
+            priority=logical_flow.priority,
+            port_no=port_no,
+            cookie=logical_flow.cookie)
+
+        if self.add_flow_to_device(flow, logical_flow):
+            flow_info = self._get_flow_info_as_json_blob(flow,
+                                                         flow_store_cookie,
+                                                         HSIA_FLOW)
+            self.update_flow_info_to_kv_store(flow.access_intf_id,
+                                              flow.onu_id, flow.uni_id,
+                                              flow.flow_id, flow_info)
+
+    def add_dhcp_trap(self, intf_id, onu_id, uni_id, port_no, classifier, action, logical_flow,
+                      alloc_id, gemport_id):
+
+        self.log.debug('add dhcp upstream trap', classifier=classifier,
+                       intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, action=action)
+
+        action.clear()
+        action[TRAP_TO_HOST] = True
+        classifier[UDP_SRC] = 68
+        classifier[UDP_DST] = 67
+        classifier[PACKET_TAG_TYPE] = SINGLE_TAG
+        classifier.pop(VLAN_VID, None)
+
+        flow_store_cookie = self._get_flow_store_cookie(classifier,
+                                                        gemport_id)
+
+        flow_id = self.resource_mgr.get_flow_id(
+            intf_id, onu_id, uni_id, flow_store_cookie
+        )
+        dhcp_flow = openolt_pb2.Flow(
+            onu_id=onu_id, uni_id=uni_id, flow_id=flow_id, flow_type=UPSTREAM,
+            access_intf_id=intf_id, gemport_id=gemport_id,
+            alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            priority=logical_flow.priority,
+            classifier=self.mk_classifier(classifier),
+            action=self.mk_action(action),
+            port_no=port_no,
+            cookie=logical_flow.cookie)
+
+        if self.add_flow_to_device(dhcp_flow, logical_flow):
+            flow_info = self._get_flow_info_as_json_blob(dhcp_flow, flow_store_cookie)
+            self.update_flow_info_to_kv_store(dhcp_flow.access_intf_id,
+                                              dhcp_flow.onu_id,
+                                              dhcp_flow.uni_id,
+                                              dhcp_flow.flow_id,
+                                              flow_info)
+
+    def add_eapol_flow(self, intf_id, onu_id, uni_id, port_no, logical_flow, alloc_id,
+                       gemport_id, vlan_id=DEFAULT_MGMT_VLAN):
+
+        uplink_classifier = dict()
+        uplink_classifier[ETH_TYPE] = EAP_ETH_TYPE
+        uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
+        uplink_classifier[VLAN_VID] = vlan_id
+
+        uplink_action = dict()
+        uplink_action[TRAP_TO_HOST] = True
+
+        flow_store_cookie = self._get_flow_store_cookie(uplink_classifier,
+                                                        gemport_id)
+        # Add Upstream EAPOL Flow.
+        uplink_flow_id = self.resource_mgr.get_flow_id(
+            intf_id, onu_id, uni_id, flow_store_cookie
+        )
+
+        upstream_flow = openolt_pb2.Flow(
+            access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=uplink_flow_id,
+            flow_type=UPSTREAM, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            gemport_id=gemport_id,
+            classifier=self.mk_classifier(uplink_classifier),
+            action=self.mk_action(uplink_action),
+            priority=logical_flow.priority,
+            port_no=port_no,
+            cookie=logical_flow.cookie)
+
+        logical_flow = copy.deepcopy(logical_flow)
+        logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([fd.vlan_vid(
+            vlan_id | 0x1000)]))
+        logical_flow.match.type = OFPMT_OXM
+
+        if self.add_flow_to_device(upstream_flow, logical_flow):
+            flow_info = self._get_flow_info_as_json_blob(upstream_flow,
+                                                         flow_store_cookie)
+            self.update_flow_info_to_kv_store(upstream_flow.access_intf_id,
+                                              upstream_flow.onu_id,
+                                              upstream_flow.uni_id,
+                                              upstream_flow.flow_id,
+                                              flow_info)
+
+        if vlan_id == DEFAULT_MGMT_VLAN:
+            # Add Downstream EAPOL Flow, Only for first EAP flow (BAL
+            # requirement)
+            # On one of the platforms (Broadcom BAL), when same DL classifier
+            # vlan was used across multiple ONUs, eapol flow re-adds after
+            # flow delete (cases of onu reboot/disable) fails.
+            # In order to generate unique vlan, a combination of intf_id
+            # onu_id and uni_id is used.
+            # uni_id defaults to 0, so add 1 to it.
+            special_vlan_downstream_flow = 4090 - intf_id * onu_id * (uni_id+1)
+            # Assert that we do not generate invalid vlans under no condition
+            assert (special_vlan_downstream_flow >= 2, 'invalid-vlan-generated')
+
+            downlink_classifier = dict()
+            downlink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
+            downlink_classifier[VLAN_VID] = special_vlan_downstream_flow
+
+            downlink_action = dict()
+            downlink_action[PUSH_VLAN] = True
+            downlink_action[VLAN_VID] = vlan_id
+
+
+            flow_store_cookie = self._get_flow_store_cookie(downlink_classifier,
+                                                            gemport_id)
+
+            downlink_flow_id = self.resource_mgr.get_flow_id(
+                intf_id, onu_id, uni_id, flow_store_cookie
+            )
+
+            downstream_flow = openolt_pb2.Flow(
+                access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=downlink_flow_id,
+                flow_type=DOWNSTREAM, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+                gemport_id=gemport_id,
+                classifier=self.mk_classifier(downlink_classifier),
+                action=self.mk_action(downlink_action),
+                priority=logical_flow.priority,
+                port_no=port_no,
+                cookie=logical_flow.cookie)
+
+            downstream_logical_flow = ofp_flow_stats(
+                id=logical_flow.id, cookie=logical_flow.cookie,
+                table_id=logical_flow.table_id, priority=logical_flow.priority,
+                flags=logical_flow.flags)
+
+            downstream_logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([
+                fd.in_port(fd.get_out_port(logical_flow)),
+                fd.vlan_vid(special_vlan_downstream_flow | 0x1000)]))
+            downstream_logical_flow.match.type = OFPMT_OXM
+
+            downstream_logical_flow.instructions.extend(
+                fd.mk_instructions_from_actions([fd.output(
+                    self.platform.mk_uni_port_num(intf_id, onu_id, uni_id))]))
+
+            if self.add_flow_to_device(downstream_flow, downstream_logical_flow):
+                flow_info = self._get_flow_info_as_json_blob(downstream_flow,
+                                                             flow_store_cookie)
+                self.update_flow_info_to_kv_store(downstream_flow.access_intf_id,
+                                                  downstream_flow.onu_id,
+                                                  downstream_flow.uni_id,
+                                                  downstream_flow.flow_id,
+                                                  flow_info)
+
+    def repush_all_different_flows(self):
+        # Check if the device is supposed to have flows, if so add them
+        # Recover static flows after a reboot
+        logical_flows = self.logical_flows_proxy.get('/').items
+        devices_flows = self.flows_proxy.get('/').items
+        logical_flows_ids_provisioned = [f.cookie for f in devices_flows]
+        for logical_flow in logical_flows:
+            try:
+                if logical_flow.id not in logical_flows_ids_provisioned:
+                    self.add_flow(logical_flow)
+            except Exception as e:
+                self.log.exception('Problem reading this flow', e=e)
+
+    def reset_flows(self):
+        self.flows_proxy.update('/', Flows())
+
+    """ Add a downstream LLDP trap flow on the NNI interface
+    """
+
+    def add_lldp_flow(self, logical_flow, port_no, network_intf_id=0):
+
+        classifier = dict()
+        classifier[ETH_TYPE] = LLDP_ETH_TYPE
+        classifier[PACKET_TAG_TYPE] = UNTAGGED
+        action = dict()
+        action[TRAP_TO_HOST] = True
+
+        # LLDP flow is installed to trap LLDP packets on the NNI port.
+        # We manage flow_id resource pool on per PON port basis.
+        # Since this situation is tricky, as a hack, we pass the NNI port
+        # index (network_intf_id) as PON port Index for the flow_id resource
+        # pool. Also, there is no ONU Id available for trapping LLDP packets
+        # on NNI port, use onu_id as -1 (invalid)
+        # ****************** CAVEAT *******************
+        # This logic works if the NNI Port Id falls within the same valid
+        # range of PON Port Ids. If this doesn't work for some OLT Vendor
+        # we need to have a re-look at this.
+        # *********************************************
+        onu_id = -1
+        uni_id = -1
+        flow_store_cookie = self._get_flow_store_cookie(classifier)
+        flow_id = self.resource_mgr.get_flow_id(network_intf_id, onu_id, uni_id,
+                                                flow_store_cookie)
+
+        downstream_flow = openolt_pb2.Flow(
+            access_intf_id=-1,  # access_intf_id not required
+            onu_id=onu_id, # onu_id not required
+            uni_id=uni_id, # uni_id not used
+            flow_id=flow_id,
+            flow_type=DOWNSTREAM,
+            network_intf_id=network_intf_id,
+            gemport_id=-1,  # gemport_id not required
+            classifier=self.mk_classifier(classifier),
+            action=self.mk_action(action),
+            priority=logical_flow.priority,
+            port_no=port_no,
+            cookie=logical_flow.cookie)
+
+        self.log.debug('add lldp downstream trap', classifier=classifier,
+                       action=action, flow=downstream_flow, port_no=port_no)
+        if self.add_flow_to_device(downstream_flow, logical_flow):
+            self.update_flow_info_to_kv_store(network_intf_id, onu_id, uni_id,
+                                              flow_id, downstream_flow)
+
+    def mk_classifier(self, classifier_info):
+
+        classifier = openolt_pb2.Classifier()
+
+        if ETH_TYPE in classifier_info:
+            classifier.eth_type = classifier_info[ETH_TYPE]
+        if IP_PROTO in classifier_info:
+            classifier.ip_proto = classifier_info[IP_PROTO]
+        if VLAN_VID in classifier_info:
+            classifier.o_vid = classifier_info[VLAN_VID]
+        if METADATA in classifier_info:
+            classifier.i_vid = classifier_info[METADATA]
+        if VLAN_PCP in classifier_info:
+            classifier.o_pbits = classifier_info[VLAN_PCP]
+        if UDP_SRC in classifier_info:
+            classifier.src_port = classifier_info[UDP_SRC]
+        if UDP_DST in classifier_info:
+            classifier.dst_port = classifier_info[UDP_DST]
+        if IPV4_DST in classifier_info:
+            classifier.dst_ip = classifier_info[IPV4_DST]
+        if IPV4_SRC in classifier_info:
+            classifier.src_ip = classifier_info[IPV4_SRC]
+        if PACKET_TAG_TYPE in classifier_info:
+            if classifier_info[PACKET_TAG_TYPE] == SINGLE_TAG:
+                classifier.pkt_tag_type = SINGLE_TAG
+            elif classifier_info[PACKET_TAG_TYPE] == DOUBLE_TAG:
+                classifier.pkt_tag_type = DOUBLE_TAG
+            elif classifier_info[PACKET_TAG_TYPE] == UNTAGGED:
+                classifier.pkt_tag_type = UNTAGGED
+            else:
+                classifier.pkt_tag_type = 'none'
+
+        return classifier
+
+    def mk_action(self, action_info):
+        action = openolt_pb2.Action()
+
+        if POP_VLAN in action_info:
+            action.o_vid = action_info[VLAN_VID]
+            action.cmd.remove_outer_tag = True
+        elif PUSH_VLAN in action_info:
+            action.o_vid = action_info[VLAN_VID]
+            action.cmd.add_outer_tag = True
+        elif TRAP_TO_HOST in action_info:
+            action.cmd.trap_to_host = True
+        else:
+            self.log.info('Invalid-action-field', action_info=action_info)
+            return
+        return action
+
+    def is_eap_enabled(self, intf_id, onu_id, uni_id):
+        flows = self.logical_flows_proxy.get('/').items
+
+        for flow in flows:
+            eap_flow = False
+            eap_intf_id = None
+            eap_onu_id = None
+            eap_uni_id = None
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.ETH_TYPE:
+                    if field.eth_type == EAP_ETH_TYPE:
+                        eap_flow = True
+                if field.type == fd.IN_PORT:
+                    eap_intf_id = self.platform.intf_id_from_uni_port_num(
+                        field.port)
+                    eap_onu_id = self.platform.onu_id_from_port_num(field.port)
+                    eap_uni_id = self.platform.uni_id_from_port_num(field.port)
+
+            if eap_flow:
+                self.log.debug('eap flow detected', onu_id=onu_id, uni_id=uni_id,
+                               intf_id=intf_id, eap_intf_id=eap_intf_id,
+                               eap_onu_id=eap_onu_id,
+                               eap_uni_id=eap_uni_id)
+            if eap_flow and intf_id == eap_intf_id and onu_id == eap_onu_id and uni_id == eap_uni_id:
+                return True, flow
+
+        return False, None
+
+    def get_subscriber_vlan(self, port):
+        self.log.debug('looking from subscriber flow for port', port=port)
+
+        flows = self.logical_flows_proxy.get('/').items
+        for flow in flows:
+            in_port = fd.get_in_port(flow)
+            out_port = fd.get_out_port(flow)
+            if in_port == port and out_port is not None and \
+                    self.platform.intf_id_to_port_type_name(out_port) \
+                    == Port.ETHERNET_NNI:
+                fields = fd.get_ofb_fields(flow)
+                self.log.debug('subscriber flow found', fields=fields)
+                for field in fields:
+                    if field.type == OFPXMT_OFB_VLAN_VID:
+                        self.log.debug('subscriber vlan found',
+                                       vlan_id=field.vlan_vid)
+                        return field.vlan_vid & 0x0fff
+        self.log.debug('No subscriber flow found', port=port)
+        return None
+
+    def add_flow_to_device(self, flow, logical_flow):
+        self.log.debug('pushing flow to device', flow=flow)
+        try:
+            self.stub.FlowAdd(flow)
+        except grpc.RpcError as grpc_e:
+            if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
+                self.log.warn('flow already exists', e=grpc_e, flow=flow)
+            else:
+                self.log.error('failed to add flow',
+                               logical_flow=logical_flow, flow=flow,
+                               grpc_error=grpc_e)
+            return False
+        else:
+            self.register_flow(logical_flow, flow)
+            return True
+
+    def update_flow_info_to_kv_store(self, intf_id, onu_id, uni_id, flow_id, flow):
+        self.resource_mgr.update_flow_id_info_for_uni(intf_id, onu_id, uni_id,
+                                                      flow_id, flow)
+
+    def register_flow(self, logical_flow, device_flow):
+        self.log.debug('registering flow in device',
+                       logical_flow=logical_flow, device_flow=device_flow)
+        stored_flow = copy.deepcopy(logical_flow)
+        stored_flow.id = self.generate_stored_id(device_flow.flow_id,
+                                                 device_flow.flow_type)
+        self.log.debug('generated device flow id', id=stored_flow.id,
+                       flow_id=device_flow.flow_id,
+                       direction=device_flow.flow_type)
+        stored_flow.cookie = logical_flow.id
+        flows = self.flows_proxy.get('/')
+        flows.items.extend([stored_flow])
+        self.flows_proxy.update('/', flows)
+
+    def find_next_flow(self, flow):
+        table_id = fd.get_goto_table_id(flow)
+        metadata = 0
+        # Prior to ONOS 1.13.5, Metadata contained the UNI output port number. In
+        # 1.13.5 and later, the lower 32-bits is the output port number and the
+        # upper 32-bits is the inner-vid we are looking for. Use just the lower 32
+        # bits.  Allows this code to work with pre- and post-1.13.5 ONOS OltPipeline
+
+        for field in fd.get_ofb_fields(flow):
+            if field.type == fd.METADATA:
+                metadata = field.table_metadata & 0xFFFFFFFF
+        if table_id is None:
+            return None
+        flows = self.logical_flows_proxy.get('/').items
+        next_flows = []
+        for f in flows:
+            if f.table_id == table_id:
+                # FIXME
+                if fd.get_in_port(f) == fd.get_in_port(flow) and \
+                        fd.get_out_port(f) == metadata:
+                    next_flows.append(f)
+
+        if len(next_flows) == 0:
+            self.log.warning('no next flow found, it may be a timing issue',
+                             flow=flow, number_of_flows=len(flows))
+            if flow.id in self.retry_add_flow_list:
+                self.log.debug('flow is already in retry list', flow_id=flow.id)
+            else:
+                self.retry_add_flow_list.append(flow.id)
+                reactor.callLater(5, self.retry_add_flow, flow)
+            return None
+
+        next_flows.sort(key=lambda f: f.priority, reverse=True)
+
+        return next_flows[0]
+
+    def update_children_flows(self, device_rules_map):
+
+        for device_id, (flows, groups) in device_rules_map.iteritems():
+            if device_id != self.device_id:
+                self.root_proxy.update('/devices/{}/flows'.format(device_id),
+                                       Flows(items=flows.values()))
+                self.root_proxy.update('/devices/{}/flow_groups'.format(
+                    device_id), FlowGroups(items=groups.values()))
+
+    def clear_flows_and_scheduler_for_logical_port(self, child_device, logical_port):
+        ofp_port_name = logical_port.ofp_port.name
+        port_no = logical_port.ofp_port.port_no
+        pon_port = child_device.proxy_address.channel_id
+        onu_id = child_device.proxy_address.onu_id
+        uni_id = self.platform.uni_id_from_port_num(logical_port)
+
+        # TODO: The DEFAULT_TECH_PROFILE_ID is assumed. Right way to do,
+        # is probably to maintain a list of Tech-profile table IDs associated
+        # with the UNI logical_port. This way, when the logical port is deleted,
+        # all the associated tech-profile configuration with the UNI logical_port
+        # can be cleared.
+        tech_profile_instance = self.tech_profile[pon_port]. \
+            get_tech_profile_instance(
+            DEFAULT_TECH_PROFILE_TABLE_ID,
+            ofp_port_name)
+        flow_ids = self.resource_mgr.get_current_flow_ids_for_uni(pon_port, onu_id, uni_id)
+        self.log.debug("outstanding-flows-to-be-cleared", flow_ids=flow_ids)
+        for flow_id in flow_ids:
+            flow_infos = self.resource_mgr.get_flow_id_info(pon_port, onu_id, uni_id, flow_id)
+            for flow_info in flow_infos:
+                direction = flow_info['flow_type']
+                flow_to_remove = openolt_pb2.Flow(flow_id=flow_id,
+                                                  flow_type=direction)
+                try:
+                    self.stub.FlowRemove(flow_to_remove)
+                except grpc.RpcError as grpc_e:
+                    if grpc_e.code() == grpc.StatusCode.NOT_FOUND:
+                        self.log.debug('This flow does not exist on the switch, '
+                                       'normal after an OLT reboot',
+                                       flow=flow_to_remove)
+                    else:
+                        raise grpc_e
+
+                self.resource_mgr.free_flow_id_for_uni(pon_port, onu_id, uni_id, flow_id)
+
+        try:
+            tconts = self.tech_profile[pon_port].get_tconts(tech_profile_instance)
+            self.stub.RemoveTconts(openolt_pb2.Tconts(intf_id=pon_port,
+                                                      onu_id=onu_id,
+                                                      uni_id=uni_id,
+                                                      port_no=port_no,
+                                                      tconts=tconts))
+        except grpc.RpcError as grpc_e:
+            self.log.error('error-removing-tcont-scheduler-queues',
+                           err=grpc_e)
+
+    def generate_stored_id(self, flow_id, direction):
+        if direction == UPSTREAM:
+            self.log.debug('upstream flow, shifting id')
+            return 0x1 << 15 | flow_id
+        elif direction == DOWNSTREAM:
+            self.log.debug('downstream flow, not shifting id')
+            return flow_id
+        else:
+            self.log.warn('Unrecognized direction', direction=direction)
+            return flow_id
+
+    def decode_stored_id(self, id):
+        if id >> 15 == 0x1:
+            return id & 0x7fff, UPSTREAM
+        else:
+            return id, DOWNSTREAM
+
+    def _populate_tech_profile_per_pon_port(self):
+        for arange in self.resource_mgr.device_info.ranges:
+            for intf_id in arange.intf_ids:
+                self.tech_profile[intf_id] = \
+                    self.resource_mgr.resource_mgrs[intf_id].tech_profile
+
+        # Make sure we have as many tech_profiles as there are pon ports on
+        # the device
+        assert len(self.tech_profile) == self.resource_mgr.device_info.pon_ports
+
+    def _get_flow_info_as_json_blob(self, flow, flow_store_cookie,
+                                    flow_category=None):
+        json_blob = MessageToDict(message=flow,
+                                  preserving_proto_field_name=True)
+        self.log.debug("flow-info", json_blob=json_blob)
+        json_blob['flow_store_cookie'] = flow_store_cookie
+        if flow_category is not None:
+            json_blob['flow_category'] = flow_category
+        flow_info = self.resource_mgr.get_flow_id_info(flow.access_intf_id,
+                                                       flow.onu_id, flow.uni_id, flow.flow_id)
+
+        if flow_info is None:
+            flow_info = list()
+            flow_info.append(json_blob)
+        else:
+            assert (isinstance(flow_info, list))
+            flow_info.append(json_blob)
+
+        return flow_info
+
+    @staticmethod
+    def _get_flow_store_cookie(classifier, gem_port=None):
+        assert isinstance(classifier, dict)
+        # We need unique flows per gem_port
+        if gem_port is not None:
+            to_hash = dumps(classifier, sort_keys=True) + str(gem_port)
+        else:
+            to_hash = dumps(classifier, sort_keys=True)
+        return hashlib.md5(to_hash).hexdigest()[:12]
+
+    def get_nni_intf_id(self):
+        if self.nni_intf_id is not None:
+            return self.nni_intf_id
+
+        port_list = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_NNI)
+        logical_port = self.adapter_agent.get_logical_port(self.logical_device_id,
+                                                           port_list[0].label)
+        self.nni_intf_id = self.platform.intf_id_from_nni_port_num(logical_port.ofp_port.port_no)
+        self.log.debug("nni-intf-d ", nni_intf_id=self.nni_intf_id)
+        return self.nni_intf_id
diff --git a/python/adapters/openolt/openolt_platform.py b/python/adapters/openolt/openolt_platform.py
new file mode 100644
index 0000000..a44eafc
--- /dev/null
+++ b/python/adapters/openolt/openolt_platform.py
@@ -0,0 +1,164 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from voltha.protos.device_pb2 import Port
+import voltha.protos.device_pb2 as dev_pb2
+
+"""
+Encoding of identifiers
+=======================
+
+Flow id
+
+    Identifies a flow within a single OLT
+    Flow Id is unique per OLT
+    Multiple GEM ports can map to same flow id
+
+     13    11              4      0
+    +--------+--------------+------+
+    | pon id |    onu id    | Flow |
+    |        |              | idx  |
+    +--------+--------------+------+
+
+    14 bits = 16384 flows (per OLT).
+
+    pon id = 4 bits = 16 PON ports
+    onu id = 7 bits = 128 ONUss per PON port
+    Flow index = 3 bits = 4 bi-directional flows per ONU
+                        = 8 uni-directional flows per ONU
+
+
+Logical (OF) UNI port number
+
+    OpenFlow port number corresponding to PON UNI
+
+     15       11              4      0
+    +--+--------+--------------+------+
+    |0 | pon id |    onu id    |   0  |
+    +--+--------+--------------+------+
+
+    pon id = 4 bits = 16 PON ports
+    onu id = 7 bits = 128 ONUs per PON port
+
+Logical (OF) NNI port number
+
+    OpenFlow port number corresponding to PON UNI
+
+     16                             0
+    +--+----------------------------+
+    |1 |                    intf_id |
+    +--+----------------------------+
+
+    No overlap with UNI port number space
+
+
+PON OLT (OF) port number
+
+    OpenFlow port number corresponding to PON OLT ports
+
+     31    28                                 0
+    +--------+------------------------~~~------+
+    |  0x2   |          pon intf id            |
+    +--------+------------------------~~~------+
+
+"""
+
+class OpenOltPlatform(object):
+    MAX_PONS_PER_OLT = 16
+    MAX_ONUS_PER_PON = 32
+    MAX_UNIS_PER_ONU = 16
+
+    def __init__(self, log, resource_mgr):
+        self.log = log
+        self.resource_mgr = resource_mgr
+
+    def mk_uni_port_num(self, intf_id, onu_id, uni_id):
+        assert intf_id < OpenOltPlatform.MAX_PONS_PER_OLT
+        assert onu_id < OpenOltPlatform.MAX_ONUS_PER_PON
+        assert uni_id < OpenOltPlatform.MAX_UNIS_PER_ONU
+        self.resource_mgr.assert_uni_id_limit(intf_id, onu_id, uni_id)
+        return intf_id << 11 | onu_id << 4 | uni_id
+
+    #def mk_flow_id(self, intf_id, onu_id, idx):
+    #    return intf_id << 9 | onu_id << 4 | idx
+
+    def uni_id_from_port_num(self, port_num):
+        return port_num & 0xF
+
+    def onu_id_from_port_num(self, port_num):
+        return (port_num >> 4) & 0x7F
+
+
+    def intf_id_from_uni_port_num(self, port_num):
+        return (port_num >> 11) & 0xF
+
+
+    def intf_id_from_pon_port_no(self, port_no):
+        return port_no & 0xF
+
+
+    def intf_id_to_port_no(self, intf_id, intf_type):
+        if intf_type is Port.ETHERNET_NNI:
+            return (0x1 << 16) | intf_id
+        elif intf_type is Port.PON_OLT:
+            return 0x2 << 28 | intf_id
+        else:
+            raise Exception('Invalid port type')
+
+
+    def intf_id_from_nni_port_num(self, port_num):
+        return port_num & 0xFFFF
+
+
+    def intf_id_to_port_type_name(self, intf_id):
+        if (2 << 28 ^ intf_id) < 16:
+            return Port.PON_OLT
+        elif intf_id & (0x1 << 16) == (0x1 << 16):
+            return Port.ETHERNET_NNI
+        else:
+            return Port.ETHERNET_UNI
+
+    def port_type_name_by_port_index(self, port_index):
+        try:
+            return dev_pb2._PORT_PORTTYPE.values_by_number[port_index].name
+        except Exception as err:
+            raise Exception(err)
+
+    def extract_access_from_flow(self, in_port, out_port):
+        if self.is_upstream(out_port):
+            return (in_port,
+                    self.intf_id_from_uni_port_num(in_port),
+                    self.onu_id_from_port_num(in_port),
+                    self.uni_id_from_port_num(in_port))
+        else:
+            return (out_port,
+                    self.intf_id_from_uni_port_num(out_port),
+                    self.onu_id_from_port_num(out_port),
+                    self.uni_id_from_port_num(out_port))
+
+    def is_upstream(self, out_port):
+
+        if out_port in [0xfffd, 0xfffffffd]:
+            # To Controller
+            return True
+        if (out_port & (0x1 << 16)) == (0x1 << 16):
+            # NNI interface
+            return True
+
+        return False
+    #
+    #def max_onus_per_pon(self):
+    #    return OpenOltPlatform.MAX_ONUS_PER_PON
diff --git a/python/adapters/openolt/openolt_resource_manager.py b/python/adapters/openolt/openolt_resource_manager.py
new file mode 100644
index 0000000..760471d
--- /dev/null
+++ b/python/adapters/openolt/openolt_resource_manager.py
@@ -0,0 +1,435 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import structlog
+
+from common.pon_resource_manager.resource_manager import PONResourceManager
+from voltha.registry import registry
+from voltha.core.config.config_backend import ConsulStore
+from voltha.core.config.config_backend import EtcdStore
+from voltha.adapters.openolt.openolt_flow_mgr import *
+
+from voltha.adapters.openolt.protos import openolt_pb2
+from voltha.adapters.openolt.openolt_platform import OpenOltPlatform
+
+
+class OpenOltResourceMgr(object):
+    BASE_PATH_KV_STORE = "service/voltha/openolt/{}"  # service/voltha/openolt/<device_id>
+
+    def __init__(self, device_id, host_and_port, extra_args, device_info):
+        self.log = structlog.get_logger(id=device_id,
+                                        ip=host_and_port)
+        self.device_id = device_id
+        self.host_and_port = host_and_port
+        self.extra_args = extra_args
+        self.device_info = device_info
+        self.args = registry('main').get_args()
+
+        # KV store's IP Address and PORT
+        if self.args.backend == 'etcd':
+            host, port = self.args.etcd.split(':', 1)
+            self.kv_store = EtcdStore(host, port,
+                                      OpenOltResourceMgr.BASE_PATH_KV_STORE.format(device_id))
+        elif self.args.backend == 'consul':
+            host, port = self.args.consul.split(':', 1)
+            self.kv_store = ConsulStore(host, port,
+                                        OpenOltResourceMgr.BASE_PATH_KV_STORE.format(device_id))
+        else:
+            self.log.error('Invalid-backend')
+            raise Exception("Invalid-backend-for-kv-store")
+
+        ranges = dict()
+        resource_mgrs_by_tech = dict()
+        self.resource_mgrs = dict()
+
+        # If a legacy driver returns protobuf without any ranges,s synthesize one from
+        # the legacy global per-device informaiton. This, in theory, is temporary until
+        # the legacy drivers are upgrade to support pool ranges.
+        if len(self.device_info.ranges) == 0:
+            arange = self.device_info.ranges.add()
+            arange.technology = self.device_info.technology
+            arange.intf_ids.extend(range(0, device_info.pon_ports))
+
+            pool = arange.pools.add()
+            pool.type = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.ONU_ID
+            pool.start = self.device_info.onu_id_start
+            pool.end = self.device_info.onu_id_end
+            pool.sharing = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.DEDICATED_PER_INTF
+
+            pool = arange.pools.add()
+            pool.type = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.ALLOC_ID
+            pool.start = self.device_info.alloc_id_start
+            pool.end = self.device_info.alloc_id_end
+            pool.sharing = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH
+
+            pool = arange.pools.add()
+            pool.type = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.GEMPORT_ID
+            pool.start = self.device_info.gemport_id_start
+            pool.end = self.device_info.gemport_id_end
+            pool.sharing = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH
+
+            pool = arange.pools.add()
+            pool.type = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.FLOW_ID
+            pool.start = self.device_info.flow_id_start
+            pool.end = self.device_info.flow_id_end
+            pool.sharing = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH
+
+        # Create a separate Resource Manager instance for each range. This assumes that
+        # each technology is represented by only a single range
+        global_resource_mgr = None
+        for arange in self.device_info.ranges:
+            technology = arange.technology
+            self.log.info("device-info", technology=technology)
+            ranges[technology] = arange
+            extra_args = self.extra_args + ' ' + PONResourceManager.OLT_MODEL_ARG + ' {}'.format(self.device_info.model)
+            resource_mgr = PONResourceManager(technology,
+                                              extra_args, self.device_id, self.args.backend, host, port)
+            resource_mgrs_by_tech[technology] = resource_mgr
+            if global_resource_mgr is None:
+                global_resource_mgr = resource_mgr
+            for intf_id in arange.intf_ids:
+                self.resource_mgrs[intf_id] = resource_mgrs_by_tech[technology]
+            self.initialize_device_resource_range_and_pool(resource_mgr, global_resource_mgr, arange)
+
+        # After we have initialized resource ranges, initialize the
+        # resource pools accordingly.
+        for technology, resource_mgr in resource_mgrs_by_tech.iteritems():
+            resource_mgr.init_device_resource_pool()
+
+    def __del__(self):
+        self.log.info("clearing-device-resource-pool")
+        for key, resource_mgr in self.resource_mgrs.iteritems():
+            resource_mgr.clear_device_resource_pool()
+
+    def assert_pon_id_limit(self, pon_intf_id):
+        assert pon_intf_id in self.resource_mgrs
+
+    def assert_onu_id_limit(self, pon_intf_id, onu_id):
+        self.assert_pon_id_limit(pon_intf_id)
+        self.resource_mgrs[pon_intf_id].assert_resource_limits(onu_id, PONResourceManager.ONU_ID)
+
+    @property
+    def max_uni_id_per_onu(self):
+        return 0 #OpenOltPlatform.MAX_UNIS_PER_ONU-1, zero-based indexing Uncomment or override to make default multi-uni
+
+    def assert_uni_id_limit(self, pon_intf_id, onu_id, uni_id):
+        self.assert_onu_id_limit(pon_intf_id, onu_id)
+        self.resource_mgrs[pon_intf_id].assert_resource_limits(uni_id, PONResourceManager.UNI_ID)
+
+    def get_onu_id(self, pon_intf_id):
+        onu_id = self.resource_mgrs[pon_intf_id].get_resource_id(
+            pon_intf_id, PONResourceManager.ONU_ID, 1)
+
+        if onu_id is not None:
+            pon_intf_onu_id = (pon_intf_id, onu_id)
+            self.resource_mgrs[pon_intf_id].init_resource_map(
+                pon_intf_onu_id)
+
+        return onu_id
+
+    def get_flow_id(self, pon_intf_id, onu_id, uni_id, flow_store_cookie,
+                    flow_category=None):
+        pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        try:
+            flow_ids = self.resource_mgrs[pon_intf_id]. \
+                get_current_flow_ids_for_onu(pon_intf_onu_id)
+            if flow_ids is not None:
+                for flow_id in flow_ids:
+                    flows = self.get_flow_id_info(pon_intf_id, onu_id, uni_id, flow_id)
+                    assert (isinstance(flows, list))
+                    for flow in flows:
+
+                        if flow_category is not None and \
+                                'flow_category' in flow and \
+                                flow['flow_category'] == flow_category:
+                            return flow_id
+                        if flow['flow_store_cookie'] == flow_store_cookie:
+                            return flow_id
+        except Exception as e:
+            self.log.error("error-retrieving-flow-info", e=e)
+
+        flow_id = self.resource_mgrs[pon_intf_id].get_resource_id(
+            pon_intf_onu_id[0], PONResourceManager.FLOW_ID)
+        if flow_id is not None:
+            self.resource_mgrs[pon_intf_id].update_flow_id_for_onu(
+                pon_intf_onu_id, flow_id
+            )
+
+        return flow_id
+
+    def get_flow_id_info(self, pon_intf_id, onu_id, uni_id, flow_id):
+        pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        return self.resource_mgrs[pon_intf_id].get_flow_id_info(pon_intf_onu_id, flow_id)
+
+    def get_current_flow_ids_for_uni(self, pon_intf_id, onu_id, uni_id):
+        pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        return self.resource_mgrs[pon_intf_id].get_current_flow_ids_for_onu(pon_intf_onu_id)
+
+    def update_flow_id_info_for_uni(self, pon_intf_id, onu_id, uni_id, flow_id, flow_data):
+        pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        return self.resource_mgrs[pon_intf_id].update_flow_id_info_for_onu(
+            pon_intf_onu_id, flow_id, flow_data)
+
+    def get_alloc_id(self, pon_intf_onu_id):
+        # Derive the pon_intf from the pon_intf_onu_id tuple
+        pon_intf = pon_intf_onu_id[0]
+        alloc_id_list = self.resource_mgrs[pon_intf].get_current_alloc_ids_for_onu(
+            pon_intf_onu_id)
+
+        if alloc_id_list and len(alloc_id_list) > 0:
+            # Since we support only one alloc_id for the ONU at the moment,
+            # return the first alloc_id in the list, if available, for that
+            # ONU.
+            return alloc_id_list[0]
+
+        alloc_id = self.resource_mgrs[pon_intf].get_resource_id(
+            pon_intf_id=pon_intf,
+            resource_type=PONResourceManager.ALLOC_ID,
+            num_of_id=1
+        )
+        if alloc_id is None:
+            self.log.error("no-alloc-id-available")
+            return None
+
+        # update the resource map on KV store with the list of alloc_id
+        # allocated for the pon_intf_onu_id tuple
+        self.resource_mgrs[pon_intf].update_alloc_ids_for_onu(pon_intf_onu_id,
+                                                              list(alloc_id))
+
+        return alloc_id
+
+    def get_current_gemport_ids_for_onu(self, pon_intf_onu_id):
+        pon_intf_id = pon_intf_onu_id[0]
+        return self.resource_mgrs[pon_intf_id].get_current_gemport_ids_for_onu(pon_intf_onu_id)
+
+    def get_current_alloc_ids_for_onu(self, pon_intf_onu_id):
+        pon_intf_id = pon_intf_onu_id[0]
+        alloc_ids = self.resource_mgrs[pon_intf_id].get_current_alloc_ids_for_onu(pon_intf_onu_id)
+        if alloc_ids is None:
+            return None
+        # We support only one tcont at the moment
+        return alloc_ids[0]
+
+    def update_gemports_ponport_to_onu_map_on_kv_store(self, gemport_list, pon_port, onu_id, uni_id):
+        for gemport in gemport_list:
+            pon_intf_gemport = (pon_port, gemport)
+            # This information is used when packet_indication is received and
+            # we need to derive the ONU Id for which the packet arrived based
+            # on the pon_intf and gemport available in the packet_indication
+            self.kv_store[str(pon_intf_gemport)] = ' '.join(map(str, (onu_id, uni_id)))
+
+    def get_onu_uni_from_ponport_gemport(self, pon_port, gemport):
+        pon_intf_gemport = (pon_port, gemport)
+        return tuple(map(int, self.kv_store[str(pon_intf_gemport)].split(' ')))
+
+    def get_gemport_id(self, pon_intf_onu_id, num_of_id=1):
+        # Derive the pon_intf and onu_id from the pon_intf_onu_id tuple
+        pon_intf = pon_intf_onu_id[0]
+        onu_id = pon_intf_onu_id[1]
+        uni_id = pon_intf_onu_id[2]
+        assert False, 'unused function'
+
+        gemport_id_list = self.resource_mgrs[pon_intf].get_current_gemport_ids_for_onu(
+            pon_intf_onu_id)
+        if gemport_id_list and len(gemport_id_list) > 0:
+            return gemport_id_list
+
+        gemport_id_list = self.resource_mgrs[pon_intf].get_resource_id(
+            pon_intf_id=pon_intf,
+            resource_type=PONResourceManager.GEMPORT_ID,
+            num_of_id=num_of_id
+        )
+
+        if gemport_id_list and len(gemport_id_list) == 0:
+            self.log.error("no-gemport-id-available")
+            return None
+
+        # update the resource map on KV store with the list of gemport_id
+        # allocated for the pon_intf_onu_id tuple
+        self.resource_mgrs[pon_intf].update_gemport_ids_for_onu(pon_intf_onu_id,
+                                                                gemport_id_list)
+
+        self.update_gemports_ponport_to_onu_map_on_kv_store(gemport_id_list,
+                                                            pon_intf, onu_id, uni_id)
+        return gemport_id_list
+
+    def free_onu_id(self, pon_intf_id, onu_id):
+        _ = self.resource_mgrs[pon_intf_id].free_resource_id(
+            pon_intf_id, PONResourceManager.ONU_ID, onu_id)
+
+        pon_intf_onu_id = (pon_intf_id, onu_id)
+        self.resource_mgrs[pon_intf_id].remove_resource_map(
+            pon_intf_onu_id)
+
+    def free_flow_id_for_uni(self, pon_intf_id, onu_id, uni_id, flow_id):
+        self.resource_mgrs[pon_intf_id].free_resource_id(
+            pon_intf_id, PONResourceManager.FLOW_ID, flow_id)
+        pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        self.resource_mgrs[pon_intf_id].update_flow_id_for_onu(pon_intf_onu_id,
+                                                               flow_id, False)
+        self.resource_mgrs[pon_intf_id].remove_flow_id_info(pon_intf_onu_id,
+                                                            flow_id)
+
+    def free_pon_resources_for_onu(self, pon_intf_id_onu_id):
+
+        pon_intf_id = pon_intf_id_onu_id[0]
+        onu_id = pon_intf_id_onu_id[1]
+        alloc_ids = \
+            self.resource_mgrs[pon_intf_id].get_current_alloc_ids_for_onu(pon_intf_id_onu_id)
+        self.resource_mgrs[pon_intf_id].free_resource_id(pon_intf_id,
+                                                         PONResourceManager.ALLOC_ID,
+                                                         alloc_ids)
+
+        gemport_ids = \
+            self.resource_mgrs[pon_intf_id].get_current_gemport_ids_for_onu(pon_intf_id_onu_id)
+        self.resource_mgrs[pon_intf_id].free_resource_id(pon_intf_id,
+                                                         PONResourceManager.GEMPORT_ID,
+                                                         gemport_ids)
+
+        flow_ids = \
+            self.resource_mgrs[pon_intf_id].get_current_flow_ids_for_onu(pon_intf_id_onu_id)
+        self.resource_mgrs[pon_intf_id].free_resource_id(pon_intf_id,
+                                                         PONResourceManager.FLOW_ID,
+                                                         flow_ids)
+
+        self.resource_mgrs[pon_intf_id].free_resource_id(pon_intf_id,
+                                                         PONResourceManager.ONU_ID,
+                                                         onu_id)
+
+        # Clear resource map associated with (pon_intf_id, gemport_id) tuple.
+        self.resource_mgrs[pon_intf_id].remove_resource_map(pon_intf_id_onu_id)
+
+        # Clear the ONU Id associated with the (pon_intf_id, gemport_id) tuple.
+        for gemport_id in gemport_ids:
+            del self.kv_store[str((pon_intf_id, gemport_id))]
+
+    def initialize_device_resource_range_and_pool(self, resource_mgr, global_resource_mgr, arange):
+        self.log.info("resource-range-pool-init", technology=resource_mgr.technology)
+
+        # first load from KV profiles
+        status = resource_mgr.init_resource_ranges_from_kv_store()
+        if not status:
+            self.log.info("failed-to-load-resource-range-from-kv-store", technology=resource_mgr.technology)
+
+        # Then apply device specific information. If KV doesn't exist
+        # or is broader than the device, the device's informationw ill
+        # dictate the range limits
+        self.log.info("using-device-info-to-init-pon-resource-ranges", technology=resource_mgr.technology)
+
+        onu_id_start = self.device_info.onu_id_start
+        onu_id_end = self.device_info.onu_id_end
+        onu_id_shared = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.DEDICATED_PER_INTF
+        onu_id_shared_pool_id = None
+        alloc_id_start = self.device_info.alloc_id_start
+        alloc_id_end = self.device_info.alloc_id_end
+        alloc_id_shared = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH  # TODO EdgeCore/BAL limitation
+        alloc_id_shared_pool_id = None
+        gemport_id_start = self.device_info.gemport_id_start
+        gemport_id_end = self.device_info.gemport_id_end
+        gemport_id_shared = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH  # TODO EdgeCore/BAL limitation
+        gemport_id_shared_pool_id = None
+        flow_id_start = self.device_info.flow_id_start
+        flow_id_end = self.device_info.flow_id_end
+        flow_id_shared = openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH  # TODO EdgeCore/BAL limitation
+        flow_id_shared_pool_id = None
+
+        global_pool_id = 0
+        for first_intf_pool_id in arange.intf_ids:
+            break
+
+        for pool in arange.pools:
+            shared_pool_id = global_pool_id if pool.sharing == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH else \
+                first_intf_pool_id if pool.sharing == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_SAME_TECH else \
+                    None
+
+            if pool.type == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.ONU_ID:
+                onu_id_start = pool.start
+                onu_id_end = pool.end
+                onu_id_shared = pool.sharing
+                onu_id_shared_pool_id = shared_pool_id
+            elif pool.type == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.ALLOC_ID:
+                alloc_id_start = pool.start
+                alloc_id_end = pool.end
+                alloc_id_shared = pool.sharing
+                alloc_id_shared_pool_id = shared_pool_id
+            elif pool.type == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.GEMPORT_ID:
+                gemport_id_start = pool.start
+                gemport_id_end = pool.end
+                gemport_id_shared = pool.sharing
+                gemport_id_shared_pool_id = shared_pool_id
+            elif pool.type == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.FLOW_ID:
+                flow_id_start = pool.start
+                flow_id_end = pool.end
+                flow_id_shared = pool.sharing
+                flow_id_shared_pool_id = shared_pool_id
+
+        self.log.info("device-info-init", technology=arange.technology,
+                      onu_id_start=onu_id_start, onu_id_end=onu_id_end, onu_id_shared_pool_id=onu_id_shared_pool_id,
+                      alloc_id_start=alloc_id_start, alloc_id_end=alloc_id_end,
+                      alloc_id_shared_pool_id=alloc_id_shared_pool_id,
+                      gemport_id_start=gemport_id_start, gemport_id_end=gemport_id_end,
+                      gemport_id_shared_pool_id=gemport_id_shared_pool_id,
+                      flow_id_start_idx=flow_id_start,
+                      flow_id_end_idx=flow_id_end,
+                      flow_id_shared_pool_id=flow_id_shared_pool_id,
+                      intf_ids=arange.intf_ids,
+                      uni_id_start_idx=0,
+                      uni_id_end_idx=self.max_uni_id_per_onu)
+
+        resource_mgr.init_default_pon_resource_ranges(
+            onu_id_start_idx=onu_id_start,
+            onu_id_end_idx=onu_id_end,
+            onu_id_shared_pool_id=onu_id_shared_pool_id,
+            alloc_id_start_idx=alloc_id_start,
+            alloc_id_end_idx=alloc_id_end,
+            alloc_id_shared_pool_id=alloc_id_shared_pool_id,
+            gemport_id_start_idx=gemport_id_start,
+            gemport_id_end_idx=gemport_id_end,
+            gemport_id_shared_pool_id=gemport_id_shared_pool_id,
+            flow_id_start_idx=flow_id_start,
+            flow_id_end_idx=flow_id_end,
+            flow_id_shared_pool_id=flow_id_shared_pool_id,
+            uni_id_start_idx=0, uni_id_end_idx=self.max_uni_id_per_onu,
+            num_of_pon_ports=self.device_info.pon_ports,
+            intf_ids=arange.intf_ids
+        )
+
+        # For global sharing, make sure to refresh both local and global resource manager instances' range
+        if global_resource_mgr is not self:
+            if onu_id_shared == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH:
+                global_resource_mgr.update_ranges(onu_id_start_idx=onu_id_start, onu_id_end_idx=onu_id_end)
+                resource_mgr.update_ranges(onu_id_start_idx=onu_id_start, onu_id_end_idx=onu_id_end,
+                                           onu_id_shared_resource_mgr=global_resource_mgr)
+
+            if alloc_id_shared == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH:
+                global_resource_mgr.update_ranges(alloc_id_start_idx=alloc_id_start, alloc_id_end_idx=alloc_id_end)
+                resource_mgr.update_ranges(alloc_id_start_idx=alloc_id_start, alloc_id_end_idx=alloc_id_end,
+                                           alloc_id_shared_resource_mgr=global_resource_mgr)
+
+            if gemport_id_shared == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH:
+                global_resource_mgr.update_ranges(gemport_id_start_idx=gemport_id_start,
+                                                  gemport_id_end_idx=gemport_id_end)
+                resource_mgr.update_ranges(gemport_id_start_idx=gemport_id_start, gemport_id_end_idx=gemport_id_end,
+                                           gemport_id_shared_resource_mgr=global_resource_mgr)
+
+            if flow_id_shared == openolt_pb2.DeviceInfo.DeviceResourceRanges.Pool.SHARED_BY_ALL_INTF_ALL_TECH:
+                global_resource_mgr.update_ranges(flow_id_start_idx=flow_id_start,
+                                                  flow_id_end_idx=flow_id_end)
+                resource_mgr.update_ranges(flow_id_start_idx=flow_id_start, flow_id_end_idx=flow_id_end,
+                                           flow_id_shared_resource_mgr=global_resource_mgr)
+
+        # Make sure loaded range fits the platform bit encoding ranges
+        resource_mgr.update_ranges(uni_id_start_idx=0, uni_id_end_idx=OpenOltPlatform.MAX_UNIS_PER_ONU-1)
diff --git a/python/adapters/openolt/openolt_statistics.py b/python/adapters/openolt/openolt_statistics.py
new file mode 100644
index 0000000..bfdb61c
--- /dev/null
+++ b/python/adapters/openolt/openolt_statistics.py
@@ -0,0 +1,606 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# from voltha.protos.events_pb2 import KpiEvent, MetricValuePairs
+# from voltha.protos.events_pb2 import KpiEventType
+
+# from voltha.adapters.openolt.nni_port import NniPort
+# from voltha.adapters.openolt.pon_port import PonPort
+# from voltha.protos.device_pb2 import Port
+
+from twisted.internet import reactor, defer
+from voltha.extensions.kpi.olt.olt_pm_metrics import OltPmMetrics
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig, Port
+
+
+class OpenOltStatisticsMgr(object):
+    def __init__(self, openolt_device, log, platform, **kargs):
+
+        """
+        kargs are used to pass debugging flags at this time.
+        :param openolt_device:
+        :param log:
+        :param kargs:
+        """
+        self.device = openolt_device
+        self.log = log
+        self.platform = platform
+        # Northbound and Southbound ports
+        # added to initialize the pm_metrics
+        self.northbound_ports = self.init_ports(type="nni")
+        self.southbound_ports = self.init_ports(type='pon')
+
+        self.pm_metrics = None
+        # The following can be used to allow a standalone test routine to start
+        # the metrics independently
+        self.metrics_init = kargs.pop("metrics_init", True)
+        if self.metrics_init == True:
+            self.init_pm_metrics()
+
+    def init_pm_metrics(self):
+        # Setup PM configuration for this device
+        if self.pm_metrics is None:
+            try:
+                self.device.reason = 'setting up Performance Monitoring configuration'
+                kwargs = {
+                    'nni-ports': self.northbound_ports.values(),
+                    'pon-ports': self.southbound_ports.values()
+                }
+                self.pm_metrics = OltPmMetrics(self.device.adapter_agent, self.device.device_id,
+                                               self.device.logical_device_id,
+                                               grouped=True, freq_override=False,
+                                               **kwargs)
+                """
+                    override the default naming structures in the OltPmMetrics class.
+                    This is being done until the protos can be modified in the BAL driver
+
+                """
+                self.pm_metrics.nni_pm_names = (self.get_openolt_port_pm_names())['nni_pm_names']
+                self.pm_metrics.nni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                 for (m, t) in self.pm_metrics.nni_pm_names}
+
+                self.pm_metrics.pon_pm_names = (self.get_openolt_port_pm_names())['pon_pm_names']
+                self.pm_metrics.pon_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                 for (m, t) in self.pm_metrics.pon_pm_names}
+                pm_config = self.pm_metrics.make_proto()
+                self.log.info("initial-pm-config", pm_config=pm_config)
+                self.device.adapter_agent.update_device_pm_config(pm_config, init=True)
+                # Start collecting stats from the device after a brief pause
+                reactor.callLater(10, self.pm_metrics.start_collector)
+            except Exception as e:
+                self.log.exception('pm-setup', e=e)
+
+    def port_statistics_indication(self, port_stats):
+        # self.log.info('port-stats-collected', stats=port_stats)
+        self.ports_statistics_kpis(port_stats)
+        #FIXME: etcd problem, do not update objects for now
+
+        #
+        #
+        # #FIXME : only the first uplink is a logical port
+        # if platform.intf_id_to_port_type_name(port_stats.intf_id) ==
+        #   Port.ETHERNET_NNI:
+        #     # ONOS update
+        #     self.update_logical_port_stats(port_stats)
+        # # update port object stats
+        # port = self.device.adapter_agent.get_port(self.device.device_id,
+        #     port_no=port_stats.intf_id)
+        #
+        # if port is None:
+        #     self.log.warn('port associated with this stats does not exist')
+        #     return
+        #
+        # port.rx_packets = port_stats.rx_packets
+        # port.rx_bytes = port_stats.rx_bytes
+        # port.rx_errors = port_stats.rx_error_packets
+        # port.tx_packets = port_stats.tx_packets
+        # port.tx_bytes = port_stats.tx_bytes
+        # port.tx_errors = port_stats.tx_error_packets
+        #
+        # # Add port does an update if port exists
+        # self.device.adapter_agent.add_port(self.device.device_id, port)
+
+    def flow_statistics_indication(self, flow_stats):
+        self.log.info('flow-stats-collected', stats=flow_stats)
+        # TODO: send to kafka ?
+        # FIXME: etcd problem, do not update objects for now
+        # # UNTESTED : the openolt driver does not yet provide flow stats
+        # self.device.adapter_agent.update_flow_stats(
+        #       self.device.logical_device_id,
+        #       flow_id=flow_stats.flow_id, packet_count=flow_stats.tx_packets,
+        #       byte_count=flow_stats.tx_bytes)
+
+    def ports_statistics_kpis(self, port_stats):
+        """
+        map the port stats values into a dictionary
+        Create a kpoEvent and publish to Kafka
+
+        :param port_stats:
+        :return:
+        """
+
+        try:
+            intf_id = port_stats.intf_id
+
+            if self.platform.intf_id_to_port_no(0, Port.ETHERNET_NNI) < intf_id < \
+                    self.platform.intf_id_to_port_no(4, Port.ETHERNET_NNI) :
+                """
+                for this release we are only interested in the first NNI for
+                Northbound.
+                we are not using the other 3
+                """
+                return
+            else:
+
+                pm_data = {}
+                pm_data["rx_bytes"] = port_stats.rx_bytes
+                pm_data["rx_packets"] = port_stats.rx_packets
+                pm_data["rx_ucast_packets"] = port_stats.rx_ucast_packets
+                pm_data["rx_mcast_packets"] = port_stats.rx_mcast_packets
+                pm_data["rx_bcast_packets"] = port_stats.rx_bcast_packets
+                pm_data["rx_error_packets"] = port_stats.rx_error_packets
+                pm_data["tx_bytes"] = port_stats.tx_bytes
+                pm_data["tx_packets"] = port_stats.tx_packets
+                pm_data["tx_ucast_packets"] = port_stats.tx_ucast_packets
+                pm_data["tx_mcast_packets"] = port_stats.tx_mcast_packets
+                pm_data["tx_bcast_packets"] = port_stats.tx_bcast_packets
+                pm_data["tx_error_packets"] = port_stats.tx_error_packets
+                pm_data["rx_crc_errors"] = port_stats.rx_crc_errors
+                pm_data["bip_errors"] = port_stats.bip_errors
+
+                pm_data["intf_id"] = intf_id
+
+                """
+                   Based upon the intf_id map to an nni port or a pon port
+                    the intf_id is the key to the north or south bound collections
+
+                    Based upon the intf_id the port object (nni_port or pon_port) will
+                    have its data attr. updated by the current dataset collected.
+
+                    For prefixing the rule is currently to use the port number and not the intf_id
+
+                """
+                #FIXME : Just use first NNI for now
+                if intf_id == self.platform.intf_id_to_port_no(0,
+                                                          Port.ETHERNET_NNI):
+                    #NNI port (just the first one)
+                    self.update_port_object_kpi_data(
+                        port_object=self.northbound_ports[port_stats.intf_id], datadict=pm_data)
+                else:
+                    #PON ports
+                    self.update_port_object_kpi_data(
+                        port_object=self.southbound_ports[port_stats.intf_id],datadict=pm_data)
+        except Exception as err:
+            self.log.exception("Error publishing kpi statistics. ", errmessage=err)
+
+    def update_logical_port_stats(self, port_stats):
+        try:
+            label = 'nni-{}'.format(port_stats.intf_id)
+            logical_port = self.device.adapter_agent.get_logical_port(
+                self.device.logical_device_id, label)
+        except KeyError as e:
+            self.log.warn('logical port was not found, it may not have been '
+                          'created yet', exception=e)
+            return
+
+        if logical_port is None:
+            self.log.error('logical-port-is-None',
+                logical_device_id=self.device.logical_device_id, label=label,
+                port_stats=port_stats)
+            return
+
+        logical_port.ofp_port_stats.rx_packets = port_stats.rx_packets
+        logical_port.ofp_port_stats.rx_bytes = port_stats.rx_bytes
+        logical_port.ofp_port_stats.tx_packets = port_stats.tx_packets
+        logical_port.ofp_port_stats.tx_bytes = port_stats.tx_bytes
+        logical_port.ofp_port_stats.rx_errors = port_stats.rx_error_packets
+        logical_port.ofp_port_stats.tx_errors = port_stats.tx_error_packets
+        logical_port.ofp_port_stats.rx_crc_err = port_stats.rx_crc_errors
+
+        self.log.debug('after-stats-update', port=logical_port)
+
+        self.device.adapter_agent.update_logical_port(
+            self.device.logical_device_id, logical_port)
+
+    """
+    The following 4 methods customer naming, the generation of the port objects, building of those
+    objects and populating new data.   The pm metrics operate on the value that are contained in the Port objects.
+    This class updates those port objects with the current data from the grpc indication and
+    post the data on a fixed interval.
+
+    """
+    def get_openolt_port_pm_names(self):
+        """
+        This collects a dictionary of the custom port names
+        used by the openolt.
+
+        Some of these are the same as the pm names used by the olt_pm_metrics class
+        if the set is the same then there is no need to call this method.   However, when
+        custom names are used in the protos then the specific names should be pushed into
+        the olt_pm_metrics class.
+
+        :return:
+        """
+        nni_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),  # Physical device interface ID/Port number
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('port_no', PmConfig.GAUGE),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER)
+        }
+        nni_pm_names_from_kpi_extension = {
+            ('intf_id', PmConfig.CONTEXT),  # Physical device interface ID/Port number
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER),
+            ('rx_crc_errors', PmConfig.COUNTER),
+            ('bip_errors', PmConfig.COUNTER),
+        }
+
+        # pon_names uses same structure as nmi_names with the addition of pon_id to context
+        pon_pm_names = {
+            ('pon_id', PmConfig.CONTEXT),  # PON ID (0..n)
+            ('port_no', PmConfig.CONTEXT),
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER)
+        }
+        pon_pm_names_from_kpi_extension = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),         # PON ID (0..n)
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_bip_errors', PmConfig.COUNTER),
+            ('in_service_onus', PmConfig.GAUGE),
+            ('closest_onu_distance', PmConfig.GAUGE)
+        }
+        onu_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+
+            ('fiber_length', PmConfig.GAUGE),
+            ('equalization_delay', PmConfig.GAUGE),
+            ('rssi', PmConfig.GAUGE),
+        }
+        gem_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+            ('gem_id', PmConfig.CONTEXT),
+
+            ('alloc_id', PmConfig.GAUGE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+        }
+        # Build a dict for the names.  The caller will index to the correct values
+        names_dict = {"nni_pm_names": nni_pm_names,
+                      "pon_pm_names": pon_pm_names,
+                      "pon_pm_names_orig": pon_pm_names_from_kpi_extension,
+                      "onu_pm_names": onu_pm_names,
+                      "gem_pm_names": gem_pm_names,
+
+                      }
+
+        return names_dict
+
+    def init_ports(self,  device_id=12345, type="nni", log=None):
+        """
+        This method collects the port objects:  nni and pon that are updated with the
+        current data from the OLT
+
+        Both the northbound (nni) and southbound ports are indexed by the interface id (intf_id)
+        and NOT the port number. When the port object is instantiated it will contain the intf_id and
+        port_no values
+
+        :param type:
+        :param device_id:
+        :param log:
+        :return:
+        """
+        try:
+            if type == "nni":
+                nni_ports = {}
+                for i in range(0, 1):
+                    nni_port = self.build_port_object(i, type='nni')
+                    nni_ports[nni_port.intf_id] = nni_port
+                return nni_ports
+            elif type == "pon":
+                pon_ports = {}
+                for i in range(0, 16):
+                    pon_port = self.build_port_object(i, type="pon")
+                    pon_ports[pon_port.intf_id] = pon_port
+                return pon_ports
+            else:
+                self.log.exception("Unmapped port type requested = " , type=type)
+                raise Exception("Unmapped port type requested = " + type)
+
+        except Exception as err:
+            raise Exception(err)
+
+    def build_port_object(self, port_num, type="nni"):
+        """
+        Seperate method to allow for updating north and southbound ports
+        newly discovered ports and devices
+
+        :param port_num:
+        :param type:
+        :return:
+        """
+        try:
+            """
+             This builds a port object which is added to the
+             appropriate northbound or southbound values
+            """
+            if type == "nni":
+                kwargs = {
+                    'port_no': port_num,
+                    'intf_id': self.platform.intf_id_to_port_no(port_num,
+                                                           Port.ETHERNET_NNI),
+                    "device_id": self.device.device_id
+                }
+                nni_port = NniPort
+                port = nni_port( **kwargs)
+                return port
+            elif type == "pon":
+                # PON ports require a different configuration
+                #  intf_id and pon_id are currently equal.
+                kwargs = {
+                    'port_no': port_num,
+                    'intf_id':  self.platform.intf_id_to_port_no(port_num,
+                                                           Port.PON_OLT),
+                    'pon-id':  self.platform.intf_id_to_port_no(port_num,
+                                                           Port.PON_OLT),
+                    "device_id": self.device.device_id
+                }
+                pon_port = PonPort
+                port = pon_port(**kwargs)
+                return port
+
+            else:
+                self.log.exception("Unknown port type")
+                raise Exception("Unknown port type")
+
+        except Exception as err:
+            self.log.exception("Unknown port type", error=err)
+            raise Exception(err)
+
+    def update_port_object_kpi_data(self, port_object, datadict={}):
+        """
+        This method takes the formatted data the is marshalled from
+        the initicator collector and updates the corresponding property by
+        attr get and set.
+
+        :param port: The port class to be updated
+        :param datadict:
+        :return:
+        """
+
+        try:
+            cur_attr = ""
+            if isinstance(port_object, NniPort):
+                for k, v in datadict.items():
+                    cur_attr = k
+                    if hasattr(port_object, k):
+                        setattr(port_object, k, v)
+            elif isinstance(port_object, PonPort):
+                for k, v in datadict.items():
+                    cur_attr = k
+                    if hasattr(port_object, k):
+                        setattr(port_object, k, v)
+            else:
+                raise Exception("Must be either PON or NNI port.")
+            return
+        except Exception as err:
+            self.log.exception("Caught error updating port data: ", cur_attr=cur_attr, errormsg=err.message)
+            raise Exception(err)
+
+
+class PonPort(object):
+    """
+    This is a highly reduced version taken from the adtran pon_port.
+    TODO: Extend for use in the openolt adapter set.
+    """
+    MAX_ONUS_SUPPORTED = 256
+    DEFAULT_ENABLED = False
+    MAX_DEPLOYMENT_RANGE = 25000  # Meters (OLT-PB maximum)
+
+    _MCAST_ONU_ID = 253
+    _MCAST_ALLOC_BASE = 0x500
+
+    _SUPPORTED_ACTIVATION_METHODS = ['autodiscovery']  # , 'autoactivate']
+    _SUPPORTED_AUTHENTICATION_METHODS = ['serial-number']
+
+    def __init__(self, **kwargs):
+        assert 'pon-id' in kwargs, 'PON ID not found'
+
+        self._pon_id = kwargs['pon-id']
+        self._device_id = kwargs['device_id']
+        self._intf_id = kwargs['intf_id']
+        self._port_no = kwargs['port_no']
+        self._port_id = 0
+        # self._name = 'xpon 0/{}'.format(self._pon_id+1)
+        self._label = 'pon-{}'.format(self._pon_id)
+
+        self._onus = {}  # serial_number-base64 -> ONU  (allowed list)
+        self._onu_by_id = {}  # onu-id -> ONU
+
+        """
+        Statistics  taken from nni_port
+        self.intf_id = 0  #handled by getter
+        self.port_no = 0  #handled by getter
+        self.port_id = 0  #handled by getter
+
+        Note:  In the current implementation of the kpis coming from the BAL the stats are the
+        samne model for NNI and PON.
+
+        TODO:   Integrate additional kpis for the PON and other southbound port objecgts.
+
+        """
+
+        self.rx_bytes = 0
+        self.rx_packets = 0
+        self.rx_mcast_packets = 0
+        self.rx_bcast_packets = 0
+        self.rx_error_packets = 0
+        self.tx_bytes = 0
+        self.tx_packets = 0
+        self.tx_ucast_packets = 0
+        self.tx_mcast_packets = 0
+        self.tx_bcast_packets = 0
+        self.tx_error_packets = 0
+        return
+
+    def __str__(self):
+        return "PonPort-{}: Admin: {}, Oper: {}, OLT: {}".format(self._label,
+                                                                 self._admin_state,
+                                                                 self._oper_status,
+                                                                 self.olt)
+
+    @property
+    def intf_id(self):
+        return self._intf_id
+
+    @intf_id.setter
+    def intf_id(self, value):
+        self._intf_id = value
+
+    @property
+    def pon_id(self):
+        return self._pon_id
+
+    @pon_id.setter
+    def pon_id(self, value):
+        self._pon_id = value
+
+    @property
+    def port_no(self):
+        return self._port_no
+
+    @port_no.setter
+    def port_no(self, value):
+        self._port_no = value
+
+    @property
+    def port_id(self):
+        return self._port_id
+
+    @intf_id.setter
+    def port_id(self, value):
+        self._port_id = value
+
+    @property
+    def onus(self):
+        """
+        Get a set of all ONUs.  While the set is immutable, do not use this method
+        to get a collection that you will iterate through that my yield the CPU
+        such as inline callback.  ONUs may be deleted at any time and they will
+        set some references to other objects to NULL during the 'delete' call.
+        Instead, get a list of ONU-IDs and iterate on these and call the 'onu'
+        method below (which will return 'None' if the ONU has been deleted.
+
+        :return: (frozenset) collection of ONU objects on this PON
+        """
+        return frozenset(self._onus.values())
+
+    @property
+    def onu_ids(self):
+        return frozenset(self._onu_by_id.keys())
+
+    def onu(self, onu_id):
+        return self._onu_by_id.get(onu_id)
+
+
+class NniPort(object):
+    """
+    Northbound network port, often Ethernet-based
+
+    This is a highly reduced version taken from the adtran nni_port code set
+    TODO:   add functions to allow for port specific values and operations
+
+    """
+    def __init__(self, **kwargs):
+        # TODO: Extend for use in the openolt adapter set.
+        self.port_no = kwargs.get('port_no')
+        self._port_no = self.port_no
+        self._name = kwargs.get('name', 'nni-{}'.format(self._port_no))
+        self._logical_port = None
+
+        # Statistics
+        self.intf_id = kwargs.pop('intf_id', None)
+        self.port_no = 0
+        self.rx_bytes = 0
+        self.rx_packets = 0
+        self.rx_mcast_packets = 0
+        self.rx_bcast_packets = 0
+        self.rx_error_packets = 0
+        self.tx_bytes = 0
+        self.tx_packets = 0
+        self.tx_ucast_packets = 0
+        self.tx_mcast_packets = 0
+        self.tx_bcast_packets = 0
+        self.tx_error_packets = 0
+        return
+
+    def __str__(self):
+        return "NniPort-{}: Admin: {}, Oper: {}, parent: {}".format(self._port_no,
+                                                                    self._admin_state,
+                                                                    self._oper_status,
+                                                                    self._parent)