VOL-1278 Adding bbsim_id for generating a unique datapath-id
Update bbsimolt adapter for supporting reboot

Change-Id: Ifdb53ff857868f063596c42b93ec67d428d19e8a
diff --git a/voltha/adapters/bbsimolt/bbsimolt.py b/voltha/adapters/bbsimolt/bbsimolt.py
index be07996..313531d 100644
--- a/voltha/adapters/bbsimolt/bbsimolt.py
+++ b/voltha/adapters/bbsimolt/bbsimolt.py
@@ -25,11 +25,10 @@
 from voltha.protos.adapter_pb2 import Adapter
 from voltha.protos.common_pb2 import LogLevel
 from voltha.adapters.openolt.openolt import OpenoltAdapter, OpenOltDefaults
-from voltha.adapters.openolt.openolt_resource_manager import OpenOltResourceMgr
-from voltha.adapters.openolt.openolt_flow_mgr import OpenOltFlowMgr
-from voltha.adapters.openolt.openolt_statistics import OpenOltStatisticsMgr
-from voltha.adapters.openolt.openolt_alarms import OpenOltAlarmMgr
-from voltha.adapters.openolt.openolt_bw import OpenOltBW
+from voltha.adapters.bbsimolt.bbsimolt_flow_mgr import BBSimOltFlowMgr
+from voltha.adapters.bbsimolt.bbsimolt_statistics import BBSimOltStatisticsMgr
+from voltha.adapters.bbsimolt.bbsimolt_bw import BBSimOltBW
+from voltha.adapters.bbsimolt.bbsimolt_alarms import BBSimOltAlarmMgr
 from voltha.adapters.bbsimolt.bbsimolt_platform import BBSimOltPlatform
 from voltha.adapters.bbsimolt.bbsimolt_device import BBSimOltDevice
 
@@ -57,25 +56,37 @@
             version='0.1',
             config=AdapterConfig(log_level=LogLevel.INFO)
         )
+        self.bbsim_id = 17  #TODO: This should be modified later
 
     def adopt_device(self, device):
+        self.bbsim_id += 1
         log.info('adopt-device', device=device)
 
         support_classes = deepcopy(OpenOltDefaults)['support_classes']
 
         # Customize platform
         support_classes['platform'] = BBSimOltPlatform
+        support_classes['flow_mgr'] = BBSimOltFlowMgr
+        support_classes['alarm_mgr'] = BBSimOltAlarmMgr
+        support_classes['stats_mgr'] = BBSimOltStatisticsMgr
+        support_classes['bw_mgr'] = BBSimOltBW
         kwargs = {
             'support_classes': support_classes,
             'adapter_agent': self.adapter_agent,
             'device': device,
-            'device_num': self.num_devices + 1
+            'device_num': self.num_devices + 1,
+            'dp_id': '00:00:00:00:00:' + hex(self.bbsim_id)[-2:]
         }
         try:
             self.devices[device.id] = BBSimOltDevice(**kwargs)
         except Exception as e:
-            log.error('Failed to adopt OpenOLT device', error=e)
+            log.error('Failed to adopt BBSimOLT device', error=e)
             del self.devices[device.id]
             raise
         else:
             self.num_devices += 1
+
+    def delete_device(self, device):
+        self.bbsim_id -= 1
+        super(BBSimOltAdapter, self).delete_device(device)
+        self.num_devices -= 1
\ No newline at end of file
diff --git a/voltha/adapters/bbsimolt/bbsimolt_alarms.py b/voltha/adapters/bbsimolt/bbsimolt_alarms.py
new file mode 100644
index 0000000..66672f9
--- /dev/null
+++ b/voltha/adapters/bbsimolt/bbsimolt_alarms.py
@@ -0,0 +1,467 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+import arrow
+import voltha.adapters.openolt.openolt_platform as platform
+# from voltha.protos.device_pb2 import Port
+from voltha.extensions.alarms.adapter_alarms import AdapterAlarms
+from voltha.extensions.alarms.simulator.simulate_alarms import AdapterAlarmSimulator
+from voltha.extensions.alarms.olt.olt_los_alarm import OltLosAlarm
+from voltha.extensions.alarms.onu.onu_dying_gasp_alarm import OnuDyingGaspAlarm
+from voltha.extensions.alarms.onu.onu_los_alarm import OnuLosAlarm
+from voltha.extensions.alarms.onu.onu_lopc_miss_alarm import OnuLopcMissAlarm
+from voltha.extensions.alarms.onu.onu_lopc_mic_error_alarm import OnuLopcMicErrorAlarm
+from voltha.extensions.alarms.onu.onu_lob_alarm import OnuLobAlarm
+from voltha.adapters.bbsimolt.bbsimolt_platform import BBSimOltPlatform
+
+from voltha.extensions.alarms.onu.onu_startup_alarm import OnuStartupAlarm
+from voltha.extensions.alarms.onu.onu_signal_degrade_alarm import OnuSignalDegradeAlarm
+from voltha.extensions.alarms.onu.onu_signal_fail_alarm import OnuSignalFailAlarm
+from voltha.extensions.alarms.onu.onu_window_drift_alarm import OnuWindowDriftAlarm
+from voltha.extensions.alarms.onu.onu_activation_fail_alarm import OnuActivationFailAlarm
+
+import voltha.protos.device_pb2 as device_pb2
+
+
+class BBSimOltAlarmMgr(object):
+    def __init__(self, log, adapter_agent, device_id, logical_device_id):
+        """
+        20180711 -  Addition of adapter_agent and device_id
+            to facilitate alarm processing and kafka posting
+        :param log:
+        :param adapter_agent:
+        :param device_id:
+        """
+        self.log = log
+        self.platform = BBSimOltPlatform(log)
+        self.adapter_agent = adapter_agent
+        self.device_id = device_id
+        self.logical_device_id = logical_device_id
+        """
+        The following is added to reduce the continual posting of OLT LOS alarming
+        to Kafka.   Set enable_alarm_suppress = true to enable  otherwise the
+        current openolt bal will send continuous olt los alarm cleared messages
+        ONU disc raised counter is place holder for a future addition
+        """
+        self.enable_alarm_suppress = True
+        self.alarm_suppress = {"olt_los_clear": 0, "onu_disc_raised": []}  # Keep count of alarms to limit.
+        try:
+            self.alarms = AdapterAlarms(self.adapter_agent, self.device_id, self.logical_device_id)
+            self.simulator = AdapterAlarmSimulator(self.alarms)
+        except Exception as initerr:
+            self.log.exception("alarmhandler-init-error", errmsg=initerr.message)
+            raise Exception(initerr)
+
+    def process_alarms(self, alarm_ind):
+        try:
+            self.log.debug('alarm-indication', alarm=alarm_ind, device_id=self.device_id)
+            if alarm_ind.HasField('los_ind'):
+                self.los_indication(alarm_ind.los_ind)
+            elif alarm_ind.HasField('dying_gasp_ind'):
+                self.dying_gasp_indication(alarm_ind.dying_gasp_ind)
+            elif alarm_ind.HasField('onu_alarm_ind'):
+                self.onu_alarm_indication(alarm_ind.onu_alarm_ind)
+            elif alarm_ind.HasField('onu_startup_fail_ind'):
+                self.onu_startup_failure_indication(
+                    alarm_ind.onu_startup_fail_ind)
+            elif alarm_ind.HasField('onu_signal_degrade_ind'):
+                self.onu_signal_degrade_indication(
+                    alarm_ind.onu_signal_degrade_ind)
+            elif alarm_ind.HasField('onu_drift_of_window_ind'):
+                self.onu_drift_of_window_indication(
+                    alarm_ind.onu_drift_of_window_ind)
+            elif alarm_ind.HasField('onu_loss_omci_ind'):
+                self.onu_loss_omci_indication(alarm_ind.onu_loss_omci_ind)
+            elif alarm_ind.HasField('onu_signals_fail_ind'):
+                self.onu_signals_failure_indication(
+                    alarm_ind.onu_signals_fail_ind)
+            elif alarm_ind.HasField('onu_tiwi_ind'):
+                self.onu_transmission_interference_warning(
+                    alarm_ind.onu_tiwi_ind)
+            elif alarm_ind.HasField('onu_activation_fail_ind'):
+                self.onu_activation_failure_indication(
+                    alarm_ind.onu_activation_fail_ind)
+            elif alarm_ind.HasField('onu_processing_error_ind'):
+                self.onu_processing_error_indication(
+                    alarm_ind.onu_processing_error_ind)
+            else:
+                self.log.warn('unknown alarm type', alarm=alarm_ind)
+
+        except Exception as e:
+            self.log.error('sorting of alarm went wrong', error=e,
+                           alarm=alarm_ind)
+
+    def simulate_alarm(self, alarm):
+        self.simulator.simulate_alarm(alarm)
+
+    def los_indication(self, los_ind):
+
+        try:
+            self.log.debug('los indication received', los_ind=los_ind,
+                           int_id=los_ind.intf_id, status=los_ind.status)
+            try:
+                port_type_name = self.platform.intf_id_to_port_type_name(los_ind.intf_id)
+                if los_ind.status == 1 or los_ind.status == "on":
+                    # Zero out the suppression counter on OLT_LOS raise
+                    self.alarm_suppress['olt_los_clear'] = 0
+                    OltLosAlarm(self.alarms, intf_id=los_ind.intf_id, port_type_name=port_type_name).raise_alarm()
+                else:
+                    """ 
+                        Check if there has been more that one los clear following a previous los
+                    """
+                    if self.alarm_suppress['olt_los_clear'] == 0 and self.enable_alarm_suppress:
+                        OltLosAlarm(self.alarms, intf_id=los_ind.intf_id, port_type_name=port_type_name).clear_alarm()
+                        self.alarm_suppress['olt_los_clear'] += 1
+
+            except Exception as alarm_err:
+                self.log.error('los-indication', errmsg=alarm_err.message)
+        except Exception as e:
+            self.log.error('los-indication', errmsg=e.message)
+
+    def dying_gasp_indication(self, dying_gasp_ind):
+        try:
+            alarm_dgi = dying_gasp_ind
+            onu_id = alarm_dgi.onu_id
+            self.log.debug('openolt-alarmindication-dispatch-dying-gasp', int_id=alarm_dgi.intf_id,
+                           onu_id=alarm_dgi.onu_id, status=alarm_dgi.status)
+            try:
+                """
+                Get the specific onu device information for the onu generating the alarm.
+                Extract the id.   In the future extract the serial number as well
+                """
+                onu_device_id = "unresolved"
+                onu_serial_number = "unresolved"
+                onu_device = self.resolve_onu_id(onu_id, port_intf_id=alarm_dgi.intf_id)
+                if onu_device != None:
+                    onu_device_id = onu_device.id
+                    onu_serial_number = onu_device.serial_number
+
+                if dying_gasp_ind.status == 1 or dying_gasp_ind.status == "on":
+                    OnuDyingGaspAlarm(self.alarms, dying_gasp_ind.intf_id,
+                                      onu_device_id).raise_alarm()
+                else:
+                    OnuDyingGaspAlarm(self.alarms, dying_gasp_ind.intf_id,
+                                      onu_device_id).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception('dying-gasp-indication', errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.error('dying_gasp_indication', error=e)
+
+    def onu_alarm_indication(self, onu_alarm_ind):
+        """
+        LOB = Los of burst
+        LOPC = Loss of PLOAM miss channel
+
+        :param onu_alarm_ind:  Alarm indication which currently contains
+            onu_id:
+            los_status:
+            lob_status:
+            lopc_miss_status:
+            lopc_mic_error_status:
+        :return:
+        """
+        self.log.info('onu-alarm-indication')
+
+        try:
+            self.log.debug('onu alarm indication received', los_status=onu_alarm_ind.los_status,
+                           onu_intf_id=onu_alarm_ind.onu_id,
+                           lob_status=onu_alarm_ind.lob_status,
+                           lopc_miss_status=onu_alarm_ind.lopc_miss_status,
+                           lopc_mic_error_status=onu_alarm_ind.lopc_mic_error_status,
+                           intf_id=onu_alarm_ind.intf_id
+                           )
+
+            try:
+                """
+                    Get the specific onu device information for the onu generating the alarm.
+                    Extract the id.   In the future extract the serial number as well
+                """
+                onu_device_id = "unresolved"
+                serial_number = "unresolved"
+                onu_device = self.resolve_onu_id(onu_alarm_ind.onu_id,  port_intf_id=onu_alarm_ind.intf_id)
+                if onu_device != None:
+                    onu_device_id = onu_device.id
+                    serial_number = onu_device.serial_number
+
+                if onu_alarm_ind.los_status == 1 or onu_alarm_ind.los_status == "on":
+                    OnuLosAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.los_status == 0 or onu_alarm_ind.los_status == "off":
+                    OnuLosAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lopc_miss_status == 1 or onu_alarm_ind.lopc_miss_status == "on":
+                    OnuLopcMissAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif (onu_alarm_ind.lopc_miss_status == 0 or onu_alarm_ind.lopc_miss_status == "off"):
+                    OnuLopcMissAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lopc_mic_error_status == 1 or onu_alarm_ind.lopc_mic_error_status == "on":
+                    OnuLopcMicErrorAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.lopc_mic_error_status == 0 or onu_alarm_ind.lopc_mic_error_status == "off":
+                    OnuLopcMicErrorAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+
+                if onu_alarm_ind.lob_status == 1 or onu_alarm_ind.lob_status == "on":
+                    OnuLobAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).raise_alarm()
+                elif onu_alarm_ind.lob_status == 0 or onu_alarm_ind.lob_status == "off":
+                    OnuLobAlarm(self.alarms, onu_id=onu_device_id, intf_id=onu_alarm_ind.intf_id).clear_alarm()
+                else:     # No Change
+                    pass
+            except Exception as alarm_err:
+                self.log.exception('onu-alarm-indication', errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception('onu-alarm-indication', errmsg=e.message)
+
+    def onu_startup_failure_indication(self, onu_startup_fail_ind):
+        """
+        Current protobuf indicator:
+        message OnuStartupFailureIndication {
+                fixed32 intf_id = 1;
+                fixed32 onu_id = 2;
+                string status = 3;
+            }
+
+        :param onu_startup_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_startup_fail_ind
+            label = "onu-startup-failure-indication"
+            self.log.debug(label + " received", onu_startup_fail_ind=ind, int_id=ind.intf_id, onu_id=ind.onu_id, status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuStartupAlarm(self.alarms, intf_id=ind.intf_id,onu_id=ind.onu_id).raise_alarm()
+                else:
+                    OnuStartupAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_signal_degrade_indication(self, onu_signal_degrade_ind):
+        """
+        Current protobuf indicator:
+        OnuSignalDegradeIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 inverse_bit_error_rate = 4;
+        }
+        :param onu_signal_degrade_ind:
+        :return:
+        """
+        try:
+            ind = onu_signal_degrade_ind
+            label = "onu-signal-degrade-indication"
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuSignalDegradeAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id,
+                                          inverse_bit_error_rate=ind.inverse_bit_error_rate).raise_alarm()
+                else:
+                    OnuSignalDegradeAlarm(self.alarms, intf_id=ind.intf_id, onu_id=ind.onu_id,
+                                          inverse_bit_error_rate=ind.inverse_bit_error_rate).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_drift_of_window_indication(self, onu_drift_of_window_ind):
+        """
+        Current protobuf indicator:
+        OnuDriftOfWindowIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 drift = 4;
+            fixed32 new_eqd = 5;
+        }
+
+        :param onu_drift_of_window_ind:
+        :return:
+        """
+        try:
+            ind = onu_drift_of_window_ind
+            label = "onu-window-drift-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_drift_of_window_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuWindowDriftAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd).raise_alarm()
+                else:
+                    OnuWindowDriftAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           drift=ind.drift,
+                           new_eqd=ind.new_eqd).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_loss_omci_indication(self, onu_loss_omci_ind):
+        self.log.info('not implemented yet')
+
+    def onu_signals_failure_indication(self, onu_signals_fail_ind):
+        """
+        Current protobuf indicator:
+        OnuSignalsFailureIndication {
+            fixed32 intf_id = 1;
+            fixed32 onu_id = 2;
+            string status = 3;
+            fixed32 inverse_bit_error_rate = 4;
+        }
+
+        :param onu_signals_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_signals_fail_ind
+            label = "onu-signal-failure-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           onu_serial_number=onu_serial_number,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate,
+                           status=ind.status)
+            try:
+                if ind.status == 1 or ind.status == "on":
+                    OnuSignalFailAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate).raise_alarm()
+                else:
+                    OnuSignalFailAlarm(self.alarms, intf_id=ind.intf_id,
+                           onu_id=onu_device_id,
+                           inverse_bit_error_rate=ind.inverse_bit_error_rate).clear_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+
+    def onu_transmission_interference_warning(self, onu_tiwi_ind):
+        self.log.info('not implemented yet')
+
+    def onu_activation_failure_indication(self, onu_activation_fail_ind):
+        """
+
+        No status is currently passed with this alarm. Consequently it will always just raise
+        :param onu_activation_fail_ind:
+        :return:
+        """
+        try:
+            ind = onu_activation_fail_ind
+            label = "onu-activation-failure-indication"
+
+            onu_device_id, onu_serial_number = self.resolve_onudev_id_onudev_serialnum(
+                self.resolve_onu_id(ind.onu_id, port_intf_id=ind.intf_id))
+
+            self.log.debug(label + ' received',
+                           onu_startup_fail_ind=ind,
+                           int_id=ind.intf_id,
+                           onu_id=ind.onu_id,
+                           onu_device_id=onu_device_id,
+                           onu_serial_number=onu_serial_number)
+            try:
+
+                OnuActivationFailAlarm(self.alarms, intf_id=ind.intf_id,
+                       onu_id=onu_device_id).raise_alarm()
+            except Exception as alarm_err:
+                self.log.exception(label, errmsg=alarm_err.message)
+
+        except Exception as e:
+            self.log.exception(label, errmsg=e.message)
+
+    def onu_processing_error_indication(self, onu_processing_error_ind):
+        self.log.info('not implemented yet')
+
+    """
+    Helper Methods
+    """
+
+    def resolve_onudev_id_onudev_serialnum(self,onu_device):
+        """
+        Convenience wrapper to resolve device_id and serial number
+        :param onu_device:
+        :return: tuple: onu_device_id, onu_serial_number
+        """
+        try:
+            onu_device_id = "unresolved"
+            onu_serial_number = "unresolved"
+            if onu_device != None:
+                onu_device_id = onu_device.id
+                onu_serial_number = onu_device.serial_number
+        except Exception as err:
+            self.log.exception("openolt-alarms-resolve-onudev-id  ", errmsg=err.message)
+            raise Exception(err)
+        return onu_device_id, onu_serial_number
+
+    def resolve_onu_id(self, onu_id, port_intf_id):
+        """
+        Resolve the onu_device from the intf_id value and port. Uses the adapter agent to
+        resolve this..
+
+        Returns None if not found. Caller will have to test for None and act accordingly.
+        :param onu_id:
+        :param port_intf_id:
+        :return:
+        """
+
+        try:
+            onu_device = None
+            onu_device = self.adapter_agent.get_child_device(
+                self.device_id,
+                parent_port_no=self.platform.intf_id_to_port_no(
+                    port_intf_id, device_pb2.Port.PON_OLT),
+                onu_id=onu_id)
+            onu_device_id = onu_device.id
+        except Exception as inner:
+            self.log.exception('resolve-onu-id', errmsg=inner.message)
+
+        return onu_device
+
diff --git a/voltha/adapters/bbsimolt/bbsimolt_bw.py b/voltha/adapters/bbsimolt/bbsimolt_bw.py
new file mode 100644
index 0000000..c31029f
--- /dev/null
+++ b/voltha/adapters/bbsimolt/bbsimolt_bw.py
@@ -0,0 +1,41 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+DEFAULT_ONU_BW_PROFILE = "default"
+DEFAULT_ONU_PIR = 1000000  # 1Gbps
+
+
+class BBSimOltBW(object):
+
+    def __init__(self, log, proxy):
+        self.log = log
+        self.proxy = proxy
+
+    def pir(self, serial_number):
+        bw = 0
+        try:
+            bw = self.proxy.get(
+                '/traffic_descriptor_profiles/{}'.format(serial_number))
+        except KeyError:
+            self.log.debug('bandwidth not configured',
+                          serial_number=serial_number)
+            try:
+                bw = self.proxy.get('/traffic_descriptor_profiles/{}' \
+                                    .format(DEFAULT_ONU_BW_PROFILE))
+            except KeyError:
+                return DEFAULT_ONU_PIR
+
+        return bw.maximum_bandwidth
diff --git a/voltha/adapters/bbsimolt/bbsimolt_device.py b/voltha/adapters/bbsimolt/bbsimolt_device.py
index bcea426..43be4e3 100644
--- a/voltha/adapters/bbsimolt/bbsimolt_device.py
+++ b/voltha/adapters/bbsimolt/bbsimolt_device.py
@@ -15,16 +15,32 @@
 #
 
 from twisted.internet import reactor
+import threading
+import binascii
+import grpc
 
-from scapy.layers.l2 import Ether
-from voltha.adapters.openolt.openolt import OpenoltDevice
-from voltha.protos.device_pb2 import Port
-from voltha.adapters.openolt.protos import openolt_pb2
+
+from scapy.layers.l2 import Ether, Dot1Q
+from voltha.adapters.bbsimolt.bbsimolt_alarms import BBSimOltAlarmMgr
+from voltha.adapters.bbsimolt.bbsimolt_statistics import BBSimOltStatisticsMgr
+from voltha.adapters.bbsimolt.bbsimolt_bw import BBSimOltBW
+from voltha.adapters.bbsimolt.bbsimolt_platform import BBSimOltPlatform
+from voltha.adapters.bbsimolt.bbsimolt_flow_mgr import BBSimOltFlowMgr
+from voltha.protos.device_pb2 import Port, Device
+from voltha.adapters.bbsimolt.protos import openolt_pb2, openolt_pb2_grpc
+from voltha.adapters.openolt.openolt_device import OpenoltDevice
 from voltha.protos.common_pb2 import ConnectStatus, OperStatus, AdminState
 from voltha.protos.bbf_fiber_tcont_body_pb2 import TcontsConfigData
 from voltha.protos.bbf_fiber_gemport_body_pb2 import GemportsConfigData
 from voltha.protos.bbf_fiber_base_pb2 import VEnetConfig
 from voltha.extensions.alarms.onu.onu_discovery_alarm import OnuDiscoveryAlarm
+from voltha.protos.openflow_13_pb2 import OFPPS_LIVE, OFPPF_FIBER, \
+    OFPPS_LINK_DOWN, OFPPF_1GB_FD, OFPC_GROUP_STATS, OFPC_PORT_STATS, \
+    OFPC_TABLE_STATS, OFPC_FLOW_STATS, ofp_switch_features, ofp_port, \
+    ofp_port_stats, ofp_desc
+from voltha.protos.logical_device_pb2 import LogicalPort
+from voltha.core.logical_device_agent import mac_str_to_tuple
+
 
 from voltha.registry import registry
 
@@ -32,6 +48,229 @@
     def __init__(self, **kwargs):
         super(BBSimOltDevice, self).__init__(**kwargs)
 
+    def do_state_init(self, event):
+        # Initialize gRPC
+        self.channel = grpc.insecure_channel(self.host_and_port)
+        self.stub = openolt_pb2_grpc.OpenoltStub(self.channel)
+        self.channel_ready_future = grpc.channel_ready_future(self.channel)
+
+        self.alarm_mgr = BBSimOltAlarmMgr(self.log, self.adapter_agent,
+                                         self.device_id,
+                                         self.logical_device_id)
+        self.stats_mgr = BBSimOltStatisticsMgr(self, self.log)
+        self.bw_mgr = BBSimOltBW(self.log, self.proxy)
+
+        self.log.info('openolt-device-created', device_id=self.device_id)
+
+    def post_init(self, event):
+        self.log.debug('post_init')
+
+        # We have reached init state, starting the indications thread
+
+        # Catch RuntimeError exception
+        try:
+            # Start indications thread
+            self.indications_thread_handle = threading.Thread(
+                target=self.indications_thread)
+            # Old getter/setter API for daemon; use it directly as a
+            # property instead. The Jinkins error will happon on the reason of
+            # Exception in thread Thread-1 (most likely raised # during
+            # interpreter shutdown)
+            self.indications_thread_handle.setDaemon(True)
+            self.indications_thread_handle.start()
+        except Exception as e:
+            self.log.exception('post_init failed', e=e)
+
+    def do_state_connected(self, event):
+        self.log.debug("do_state_connected")
+
+        device = self.adapter_agent.get_device(self.device_id)
+
+        device_info = self.stub.GetDeviceInfo(openolt_pb2.Empty())
+        self.log.info('Device connected', device_info=device_info)
+
+        device.vendor = device_info.vendor
+        device.model = device_info.model
+        device.hardware_version = device_info.hardware_version
+        device.firmware_version = device_info.firmware_version
+        self.platform = BBSimOltPlatform(self.log)
+        self.flow_mgr = BBSimOltFlowMgr(self.log, self.stub, self.device_id,
+                                       self.logical_device_id)
+
+        # TODO: use content of device_info for Resource manager (VOL-948)
+
+        # TODO: check for uptime and reboot if too long (VOL-1192)
+
+        device.connect_status = ConnectStatus.REACHABLE
+        self.adapter_agent.update_device(device)
+
+    def do_state_up(self, event):
+        self.log.debug("do_state_up")
+
+        device = self.adapter_agent.get_device(self.device_id)
+
+        # Update phys OF device
+        device.parent_id = self.logical_device_id
+        device.oper_status = OperStatus.ACTIVE
+        self.adapter_agent.update_device(device)
+
+    def do_state_down(self, event):
+        self.log.debug("do_state_down", device = self, device_id = self.device_id)
+        oper_state = OperStatus.UNKNOWN
+        connect_state = ConnectStatus.UNREACHABLE
+
+        # Propagating to the children
+
+        # Children ports
+        child_devices = self.adapter_agent.get_child_devices(self.device_id)
+        for onu_device in child_devices:
+            uni_no = self.platform.mk_uni_port_num(
+                onu_device.proxy_address.channel_id,
+                onu_device.proxy_address.onu_id)
+            uni_name = self.port_name(uni_no, Port.ETHERNET_UNI,
+                                      serial_number=onu_device.serial_number)
+            onu_adapter_agent = \
+                registry('adapter_loader').get_agent(onu_device.adapter)
+            onu_adapter_agent.update_interface(onu_device,
+                                               {'oper_state': 'down'})
+            self.onu_ports_down(onu_device, uni_no, uni_name, oper_state)
+        # Children devices
+        self.adapter_agent.update_child_devices_state(
+            self.device_id, oper_status=oper_state,
+            connect_status=connect_state)
+        # Device Ports
+        device_ports = self.adapter_agent.get_ports(self.device_id,
+                                                    Port.ETHERNET_NNI)
+        logical_ports_ids = [port.label for port in device_ports]
+        device_ports += self.adapter_agent.get_ports(self.device_id,
+                                                     Port.PON_OLT)
+
+        for port in device_ports:
+            port.oper_status = oper_state
+            self.adapter_agent.add_port(self.device_id, port)
+
+        # Device logical port
+        for logical_port_id in logical_ports_ids:
+            logical_port = self.adapter_agent.get_logical_port(
+                self.logical_device_id, logical_port_id)
+            logical_port.ofp_port.state = OFPPS_LINK_DOWN
+            self.adapter_agent.update_logical_port(self.logical_device_id,
+                                                   logical_port)
+
+        # Device
+        device = self.adapter_agent.get_device(self.device_id)
+        device.oper_status = oper_state
+        device.connect_status = connect_state
+
+        reactor.callLater(2, self.adapter_agent.update_device, device)
+
+    # def post_up(self, event):
+    #     self.log.debug('post-up')
+    #     self.flow_mgr.reseed_flows()
+
+    def post_down(self, event):
+        self.log.debug('post_down')
+        self.flow_mgr.reset_flows()
+
+
+    def indications_thread(self):
+        self.log.debug('starting-indications-thread')
+        self.log.debug('connecting to olt', device_id=self.device_id)
+        self.channel_ready_future.result()  # blocking call
+        self.log.info('connected to olt', device_id=self.device_id)
+        self.go_state_connected()
+
+        self.indications = self.stub.EnableIndication(openolt_pb2.Empty())
+
+        while True:
+            try:
+                # get the next indication from olt
+                ind = next(self.indications)
+            except Exception as e:
+                self.log.warn('gRPC connection lost', error=e)
+                reactor.callFromThread(self.go_state_down)
+                reactor.callFromThread(self.go_state_init)
+                break
+            else:
+                self.log.debug("rx indication", indication=ind)
+
+                # indication handlers run in the main event loop
+                if ind.HasField('olt_ind'):
+                    reactor.callFromThread(self.olt_indication, ind.olt_ind)
+                elif ind.HasField('intf_ind'):
+                    reactor.callFromThread(self.intf_indication, ind.intf_ind)
+                elif ind.HasField('intf_oper_ind'):
+                    reactor.callFromThread(self.intf_oper_indication,
+                                           ind.intf_oper_ind)
+                elif ind.HasField('onu_disc_ind'):
+                    reactor.callFromThread(self.onu_discovery_indication,
+                                           ind.onu_disc_ind)
+                elif ind.HasField('onu_ind'):
+                    reactor.callFromThread(self.onu_indication, ind.onu_ind)
+                elif ind.HasField('omci_ind'):
+                    reactor.callFromThread(self.omci_indication, ind.omci_ind)
+                elif ind.HasField('pkt_ind'):
+                    reactor.callFromThread(self.packet_indication, ind.pkt_ind)
+                elif ind.HasField('port_stats'):
+                    reactor.callFromThread(
+                            self.stats_mgr.port_statistics_indication,
+                            ind.port_stats)
+                elif ind.HasField('flow_stats'):
+                    reactor.callFromThread(
+                            self.stats_mgr.flow_statistics_indication,
+                            ind.flow_stats)
+                elif ind.HasField('alarm_ind'):
+                    reactor.callFromThread(self.alarm_mgr.process_alarms,
+                                           ind.alarm_ind)
+                else:
+                    self.log.warn('unknown indication type')
+
+    def olt_indication(self, olt_indication):
+        if olt_indication.oper_state == "up":
+            self.go_state_up()
+        elif olt_indication.oper_state == "down":
+            self.go_state_down()
+
+    def intf_indication(self, intf_indication):
+        self.log.debug("intf indication", intf_id=intf_indication.intf_id,
+                       oper_state=intf_indication.oper_state)
+
+        if intf_indication.oper_state == "up":
+            oper_status = OperStatus.ACTIVE
+        else:
+            oper_status = OperStatus.DISCOVERED
+
+        # add_port update the port if it exists
+        self.add_port(intf_indication.intf_id, Port.PON_OLT, oper_status)
+
+    def intf_oper_indication(self, intf_oper_indication):
+        self.log.debug("Received interface oper state change indication",
+                       intf_id=intf_oper_indication.intf_id,
+                       type=intf_oper_indication.type,
+                       oper_state=intf_oper_indication.oper_state)
+
+        if intf_oper_indication.oper_state == "up":
+            oper_state = OperStatus.ACTIVE
+        else:
+            oper_state = OperStatus.DISCOVERED
+
+        if intf_oper_indication.type == "nni":
+
+            # FIXME - creating logical port for 2nd interface throws exception!
+            if intf_oper_indication.intf_id != 0:
+                return
+
+            # add_(logical_)port update the port if it exists
+            port_no, label = self.add_port(intf_oper_indication.intf_id,
+                                           Port.ETHERNET_NNI, oper_state)
+            self.log.debug("int_oper_indication", port_no=port_no, label=label)
+            self.add_logical_port(port_no, intf_oper_indication.intf_id,
+                                  oper_state)
+
+        elif intf_oper_indication.type == "pon":
+            # FIXME - handle PON oper state change
+            pass
+
     def onu_discovery_indication(self, onu_disc_indication):
         intf_id = onu_disc_indication.intf_id
         serial_number = onu_disc_indication.serial_number
@@ -100,21 +339,6 @@
                 self.log.warn('unexpected state', onu_id=onu_id,
                               onu_device_oper_state=onu_device.oper_status)
 
-    def packet_indication(self, pkt_indication):
-
-        self.log.debug("packet indication", intf_id=pkt_indication.intf_id,
-                       gemport_id=pkt_indication.gemport_id,
-                       flow_id=pkt_indication.flow_id)
-
-        onu_id = self.platform.onu_id_from_gemport_id(pkt_indication.gemport_id)
-        logical_port_num = self.platform.mk_uni_port_num(pkt_indication.intf_id,
-                                                    onu_id)
-
-        pkt = Ether(pkt_indication.pkt)
-        kw = dict(logical_device_id=self.logical_device_id,
-                  logical_port_no=logical_port_num)
-        self.adapter_agent.send_packet_in(packet=str(pkt), **kw)
-
     def onu_indication(self, onu_indication):
         self.log.debug("onu indication", intf_id=onu_indication.intf_id,
                        onu_id=onu_indication.onu_id,
@@ -290,6 +514,30 @@
                 reactor.callLater(12, port_config)
                 reactor.callLater(12, onu_update_oper_status)
 
+            elif onu_device.adapter == 'brcm_openomci_onu':
+                self.log.debug('using-brcm_openomci_onu')
+
+                # tcont creation (onu)
+                tcont = TcontsConfigData()
+                tcont.alloc_id = self.platform.mk_alloc_id(
+                    onu_indication.intf_id, onu_indication.onu_id)
+
+                # gem port creation
+                gem_port = GemportsConfigData()
+                gem_port.gemport_id = self.platform.mk_gemport_id(
+                    onu_indication.intf_id,
+                    onu_indication.onu_id)
+                gem_port.tcont_ref = str(tcont.alloc_id)
+
+                self.log.info('inject-tcont-gem-data-onu-handler',
+                              onu_indication=onu_indication, tcont=tcont,
+                              gem_port=gem_port)
+
+                onu_adapter_agent.create_tcont(onu_device, tcont,
+                                               traffic_descriptor_data=None)
+                onu_adapter_agent.create_gemport(onu_device, gem_port)
+                onu_adapter_agent.create_interface(onu_device, onu_indication)
+
             else:
                 self.log.error('unsupported-openolt-onu-adapter')
 
@@ -297,6 +545,385 @@
             self.log.warn('Not-implemented-or-invalid-value-of-oper-state',
                           oper_state=onu_indication.oper_state)
 
+    def onu_ports_down(self, onu_device, uni_no, uni_name, oper_state):
+        # Set port oper state to Discovered
+        # add port will update port if it exists
+        self.adapter_agent.add_port(
+            self.device_id,
+            Port(
+                port_no=uni_no,
+                label=uni_name,
+                type=Port.ETHERNET_UNI,
+                admin_state=onu_device.admin_state,
+                oper_status=oper_state))
+
+        # Disable logical port
+        onu_ports = self.proxy.get('devices/{}/ports'.format(onu_device.id))
+        onu_port_id = None
+        for onu_port in onu_ports:
+            if onu_port.port_no == uni_no:
+                onu_port_id = onu_port.label
+        if onu_port_id is None:
+            self.log.error('matching-onu-port-label-not-found',
+                           onu_id=onu_device.id, olt_id=self.device_id,
+                           onu_ports=onu_ports)
+            return
+        try:
+            onu_logical_port = self.adapter_agent.get_logical_port(
+                logical_device_id=self.logical_device_id, port_id=onu_port_id)
+            onu_logical_port.ofp_port.state = OFPPS_LINK_DOWN
+            self.adapter_agent.update_logical_port(
+                logical_device_id=self.logical_device_id,
+                port=onu_logical_port)
+            self.log.debug('cascading-oper-state-to-port-and-logical-port')
+        except KeyError as e:
+            self.log.error('matching-onu-port-label-invalid',
+                           onu_id=onu_device.id, olt_id=self.device_id,
+                           onu_ports=onu_ports, onu_port_id=onu_port_id,
+                           error=e)
+
+    def omci_indication(self, omci_indication):
+
+        self.log.debug("omci indication", intf_id=omci_indication.intf_id,
+                       onu_id=omci_indication.onu_id)
+
+        onu_device = self.adapter_agent.get_child_device(
+            self.device_id, onu_id=omci_indication.onu_id,
+            parent_port_no=self.platform.intf_id_to_port_no(
+                            omci_indication.intf_id, Port.PON_OLT),)
+
+        self.adapter_agent.receive_proxied_message(onu_device.proxy_address,
+                                                   omci_indication.pkt)
+
+    def packet_indication(self, pkt_indication):
+
+        self.log.debug("packet indication", intf_id=pkt_indication.intf_id,
+                       gemport_id=pkt_indication.gemport_id,
+                       flow_id=pkt_indication.flow_id)
+
+        onu_id = self.platform.onu_id_from_gemport_id(pkt_indication.gemport_id)
+        logical_port_num = self.platform.mk_uni_port_num(pkt_indication.intf_id,
+                                                    onu_id)
+
+        pkt = Ether(pkt_indication.pkt)
+        kw = dict(logical_device_id=self.logical_device_id,
+                  logical_port_no=logical_port_num)
+        self.adapter_agent.send_packet_in(packet=str(pkt), **kw)
+
+    def packet_out(self, egress_port, msg):
+        pkt = Ether(msg)
+        self.log.info('packet out', egress_port=egress_port,
+                      packet=str(pkt).encode("HEX"))
+
+        # Find port type
+        egress_port_type = self.port_type(egress_port)
+
+        if egress_port_type == Port.ETHERNET_UNI:
+
+            if pkt.haslayer(Dot1Q):
+                outer_shim = pkt.getlayer(Dot1Q)
+                if isinstance(outer_shim.payload, Dot1Q):
+                    # If double tag, remove the outer tag
+                    payload = (
+                        Ether(src=pkt.src, dst=pkt.dst, type=outer_shim.type) /
+                        outer_shim.payload
+                    )
+                else:
+                    payload = pkt
+            else:
+                payload = pkt
+
+            send_pkt = binascii.unhexlify(str(payload).encode("HEX"))
+
+            self.log.debug(
+                'sending-packet-to-ONU', egress_port=egress_port,
+                intf_id=self.platform.intf_id_from_uni_port_num(egress_port),
+                onu_id=self.platform.onu_id_from_port_num(egress_port),
+                packet=str(payload).encode("HEX"))
+
+            onu_pkt = openolt_pb2.OnuPacket(
+                intf_id=self.platform.intf_id_from_uni_port_num(egress_port),
+                onu_id=self.platform.onu_id_from_port_num(egress_port),
+                pkt=send_pkt)
+
+            self.stub.OnuPacketOut(onu_pkt)
+
+        elif egress_port_type == Port.ETHERNET_NNI:
+            self.log.debug('sending-packet-to-uplink', egress_port=egress_port,
+                           packet=str(pkt).encode("HEX"))
+
+            send_pkt = binascii.unhexlify(str(pkt).encode("HEX"))
+
+            uplink_pkt = openolt_pb2.UplinkPacket(
+                intf_id=self.platform.intf_id_from_nni_port_num(egress_port),
+                pkt=send_pkt)
+
+            self.stub.UplinkPacketOut(uplink_pkt)
+
+        else:
+            self.log.warn('Packet-out-to-this-interface-type-not-implemented',
+                          egress_port=egress_port,
+                          port_type=egress_port_type)
+
+    def send_proxied_message(self, proxy_address, msg):
+        onu_device = self.adapter_agent.get_child_device(
+            self.device_id,
+            onu_id=proxy_address.onu_id,
+            parent_port_no=self.platform.intf_id_to_port_no(
+                proxy_address.channel_id, Port.PON_OLT))
+        if onu_device.connect_status != ConnectStatus.REACHABLE:
+            self.log.debug('ONU is not reachable, cannot send OMCI',
+                           serial_number=onu_device.serial_number,
+                           intf_id=onu_device.proxy_address.channel_id,
+                           onu_id=onu_device.proxy_address.onu_id)
+            return
+        omci = openolt_pb2.OmciMsg(intf_id=proxy_address.channel_id,
+                                   onu_id=proxy_address.onu_id, pkt=str(msg))
+        self.stub.OmciMsgOut(omci)
+
+    def add_onu_device(self, intf_id, port_no, onu_id, serial_number):
+        self.log.info("Adding ONU", port_no=port_no, onu_id=onu_id,
+                      serial_number=serial_number)
+
+        # NOTE - channel_id of onu is set to intf_id
+        proxy_address = Device.ProxyAddress(device_id=self.device_id,
+                                            channel_id=intf_id, onu_id=onu_id,
+                                            onu_session_id=onu_id)
+
+        self.log.debug("Adding ONU", proxy_address=proxy_address)
+
+        serial_number_str = self.stringify_serial_number(serial_number)
+
+        self.adapter_agent.add_onu_device(
+            parent_device_id=self.device_id, parent_port_no=port_no,
+            vendor_id=serial_number.vendor_id, proxy_address=proxy_address,
+            root=True, serial_number=serial_number_str,
+            admin_state=AdminState.ENABLED)
+
+    def port_name(self, port_no, port_type, intf_id=None, serial_number=None):
+        if port_type is Port.ETHERNET_NNI:
+            return "nni-" + str(port_no)
+        elif port_type is Port.PON_OLT:
+            return "pon" + str(intf_id)
+        elif port_type is Port.ETHERNET_UNI:
+            if serial_number is not None:
+                return serial_number
+            else:
+                return "uni-{}".format(port_no)
+
+    def port_type(self, port_no):
+        ports = self.adapter_agent.get_ports(self.device_id)
+        for port in ports:
+            if port.port_no == port_no:
+                return port.type
+        return None
+
+    def add_logical_port(self, port_no, intf_id, oper_state):
+        self.log.info('adding-logical-port', port_no=port_no)
+
+        label = self.port_name(port_no, Port.ETHERNET_NNI)
+
+        cap = OFPPF_1GB_FD | OFPPF_FIBER
+        curr_speed = OFPPF_1GB_FD
+        max_speed = OFPPF_1GB_FD
+
+        if oper_state == OperStatus.ACTIVE:
+            of_oper_state = OFPPS_LIVE
+        else:
+            of_oper_state = OFPPS_LINK_DOWN
+
+        ofp = ofp_port(
+            port_no=port_no,
+            hw_addr=mac_str_to_tuple(self._get_mac_form_port_no(port_no)),
+            name=label, config=0, state=of_oper_state, curr=cap,
+            advertised=cap, peer=cap, curr_speed=curr_speed,
+            max_speed=max_speed)
+
+        ofp_stats = ofp_port_stats(port_no=port_no)
+
+        logical_port = LogicalPort(
+            id=label, ofp_port=ofp, device_id=self.device_id,
+            device_port_no=port_no, root_port=True,
+            ofp_port_stats=ofp_stats)
+
+        self.adapter_agent.add_logical_port(self.logical_device_id,
+                                            logical_port)
+
+    def _get_mac_form_port_no(self, port_no):
+        mac = ''
+        for i in range(4):
+            mac = ':%02x' % ((port_no >> (i * 8)) & 0xff) + mac
+        return '00:00' + mac
+
+
+    def add_port(self, intf_id, port_type, oper_status):
+        port_no = self.platform.intf_id_to_port_no(intf_id, port_type)
+
+        label = self.port_name(port_no, port_type, intf_id)
+
+        self.log.debug('adding-port', port_no=port_no, label=label,
+                       port_type=port_type)
+
+        port = Port(port_no=port_no, label=label, type=port_type,
+                    admin_state=AdminState.ENABLED, oper_status=oper_status)
+
+        self.adapter_agent.add_port(self.device_id, port)
+
+        return port_no, label
+
+    def delete_logical_port(self, child_device_id):
+        logical_ports = self.proxy.get('/logical_devices/{}/ports'.format(
+            self.logical_device_id))
+        for logical_port in logical_ports:
+            if logical_port.device_id == child_device_id:
+                self.log.debug('delete-logical-port',
+                               onu_device_id=child_device_id,
+                               logical_port=logical_port)
+                self.adapter_agent.delete_logical_port(
+                    self.logical_device_id, logical_port)
+                return
+
+    def delete_port(self, child_serial_number):
+        ports = self.proxy.get('/devices/{}/ports'.format(
+            self.device_id))
+        for port in ports:
+            if port.label == child_serial_number:
+                self.log.debug('delete-port',
+                               onu_serial_number=child_serial_number,
+                               port=port)
+                self.adapter_agent.delete_port(self.device_id, port)
+                return
+
+    def new_onu_id(self, intf_id):
+        onu_devices = self.adapter_agent.get_child_devices(self.device_id)
+        pon_onu_ids = [onu_device.proxy_address.onu_id
+                       for onu_device in onu_devices
+                       if onu_device.proxy_address.channel_id == intf_id]
+        for i in range(1, self.platform.MAX_ONUS_PER_PON):
+            if i not in pon_onu_ids:
+                return i
+
+        self.log.error('All available onu_ids taken on this pon',
+                       intf_id=intf_id, ids_taken=self.platform.MAX_ONUS_PER_PON)
+        return None
+
+    def update_flow_table(self, flows):
+        self.log.debug('No updates here now, all is done in logical flows '
+                       'update')
+
+    def update_logical_flows(self, flows_to_add, flows_to_remove,
+                             device_rules_map):
+        if not self.is_state_up():
+            self.log.info('The OLT is not up, we cannot update flows',
+                          flows_to_add=[f.id for f in flows_to_add],
+                          flows_to_remove=[f.id for f in flows_to_remove])
+            return
+
+        try:
+            self.flow_mgr.update_children_flows(device_rules_map)
+        except Exception as e:
+            self.log.error('Error updating children flows', error=e)
+
+        self.log.debug('logical flows update', flows_to_add=flows_to_add,
+                       flows_to_remove=flows_to_remove)
+
+        for flow in flows_to_add:
+
+            try:
+                self.flow_mgr.add_flow(flow)
+            except grpc.RpcError as grpc_e:
+                if grpc_e.code() == grpc.StatusCode.ALREADY_EXISTS:
+                    self.log.warn('flow already exists', e=grpc_e, flow=flow)
+                else:
+                    self.log.error('failed to add flow', flow=flow,
+                                   grpc_error=grpc_e)
+            except Exception as e:
+                self.log.error('failed to add flow', flow=flow, e=e)
+
+        for flow in flows_to_remove:
+
+            try:
+                self.flow_mgr.remove_flow(flow)
+            except Exception as e:
+                self.log.error('failed to remove flow', flow=flow, e=e)
+
+        self.flow_mgr.repush_all_different_flows()
+
+    # There has to be a better way to do this
+    def ip_hex(self, ip):
+        octets = ip.split(".")
+        hex_ip = []
+        for octet in octets:
+            octet_hex = hex(int(octet))
+            octet_hex = octet_hex.split('0x')[1]
+            octet_hex = octet_hex.rjust(2, '0')
+            hex_ip.append(octet_hex)
+        return ":".join(hex_ip)
+
+    def stringify_vendor_specific(self, vendor_specific):
+        return ''.join(str(i) for i in [
+            hex(ord(vendor_specific[0]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[0]) & 0x0f)[2:],
+            hex(ord(vendor_specific[1]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[1]) & 0x0f)[2:],
+            hex(ord(vendor_specific[2]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[2]) & 0x0f)[2:],
+            hex(ord(vendor_specific[3]) >> 4 & 0x0f)[2:],
+            hex(ord(vendor_specific[3]) & 0x0f)[2:]])
+
+    def stringify_serial_number(self, serial_number):
+        return ''.join([serial_number.vendor_id,
+                        self.stringify_vendor_specific(
+                            serial_number.vendor_specific)])
+
+    def destringify_serial_number(self, serial_number_str):
+        serial_number = openolt_pb2.SerialNumber(
+            vendor_id=serial_number_str[:4].encode('utf-8'),
+            vendor_specific=binascii.unhexlify(serial_number_str[4:]))
+        return serial_number
+
+    def disable(self):
+        self.log.debug('sending-deactivate-olt-message',
+                       device_id=self.device_id)
+
+        try:
+            # Send grpc call
+            self.stub.DisableOlt(openolt_pb2.Empty())
+            # The resulting indication will bring the OLT down
+            # self.go_state_down()
+            self.log.info('openolt device disabled')
+        except Exception as e:
+            self.log.error('Failure to disable openolt device', error=e)
+
+    def delete(self):
+        self.log.info('deleting-olt', device_id=self.device_id,
+                      logical_device_id=self.logical_device_id)
+
+        try:
+            # Rebooting to reset the state
+            self.reboot()
+            # Removing logical device
+            ld = self.adapter_agent.get_logical_device(self.logical_device_id)
+            self.adapter_agent.delete_logical_device(ld)
+        except Exception as e:
+            self.log.error('Failure to delete openolt device', error=e)
+            raise e
+        else:
+            self.log.info('successfully-deleted-olt', device_id=self.device_id)
+
+    def reenable(self):
+        self.log.debug('reenabling-olt', device_id=self.device_id)
+
+        try:
+            self.stub.ReenableOlt(openolt_pb2.Empty())
+
+            self.log.info('enabling-all-ports', device_id=self.device_id)
+            self.adapter_agent.enable_all_ports(self.device_id)
+        except Exception as e:
+            self.log.error('Failure to reenable openolt device', error=e)
+        else:
+            self.log.info('openolt device reenabled')
+
     def activate_onu(self, intf_id, onu_id, serial_number,
                      serial_number_str):
         pir = self.bw_mgr.pir(serial_number_str)
@@ -306,4 +933,52 @@
         onu = openolt_pb2.Onu(intf_id=intf_id, onu_id=onu_id,
                               serial_number=serial_number, pir=pir)
         self.stub.ActivateOnu(onu)
-        self.log.info('onu-activated', serial_number=serial_number_str)
\ No newline at end of file
+        self.log.info('onu-activated', serial_number=serial_number_str)
+
+    def delete_child_device(self, child_device):
+        self.log.debug('sending-deactivate-onu',
+                       olt_device_id=self.device_id,
+                       onu_device=child_device,
+                       onu_serial_number=child_device.serial_number)
+        try:
+            self.adapter_agent.delete_child_device(self.device_id,
+                                                   child_device.id,
+                                                   child_device)
+        except Exception as e:
+            self.log.error('adapter_agent error', error=e)
+        try:
+            self.delete_logical_port(child_device.id)
+        except Exception as e:
+            self.log.error('logical_port delete error', error=e)
+        try:
+            self.delete_port(child_device.serial_number)
+        except Exception as e:
+            self.log.error('port delete error', error=e)
+        serial_number = self.destringify_serial_number(
+            child_device.serial_number)
+        onu = openolt_pb2.Onu(intf_id=child_device.proxy_address.channel_id,
+                              onu_id=child_device.proxy_address.onu_id,
+                              serial_number=serial_number)
+        self.stub.DeleteOnu(onu)
+
+    def reboot(self):
+        self.log.debug('rebooting openolt device', device_id=self.device_id)
+        try:
+            self.stub.Reboot(openolt_pb2.Empty())
+        except Exception as e:
+            self.log.error('something went wrong with the reboot', error=e)
+        else:
+            self.log.info('device rebooted')
+
+    def trigger_statistics_collection(self):
+        try:
+            self.stub.CollectStatistics(openolt_pb2.Empty())
+        except Exception as e:
+            self.log.error('Error while triggering statistics collection',
+                           error=e)
+        else:
+            self.log.info('statistics requested')
+
+    def simulate_alarm(self, alarm):
+        self.alarm_mgr.simulate_alarm(alarm)
+
diff --git a/voltha/adapters/bbsimolt/bbsimolt_flow_mgr.py b/voltha/adapters/bbsimolt/bbsimolt_flow_mgr.py
new file mode 100644
index 0000000..1494a01
--- /dev/null
+++ b/voltha/adapters/bbsimolt/bbsimolt_flow_mgr.py
@@ -0,0 +1,631 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import copy
+from twisted.internet import reactor
+import grpc
+
+from voltha.protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, \
+    ofp_flow_stats, OFPMT_OXM, Flows, FlowGroups, OFPXMT_OFB_IN_PORT, \
+    OFPXMT_OFB_VLAN_VID
+from voltha.protos.device_pb2 import Port
+import voltha.core.flow_decomposer as fd
+import bbsimolt_platform as platform
+from voltha.adapters.bbsimolt.protos import openolt_pb2
+from voltha.registry import registry
+
+HSIA_FLOW_INDEX = 0  # FIXME
+DHCP_FLOW_INDEX = 1  # FIXME
+DHCP_DOWNLINK_FLOW_INDEX = 6  # FIXME
+EAPOL_FLOW_INDEX = 2  # FIXME
+EAPOL_DOWNLINK_FLOW_INDEX = 3  # FIXME
+EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX = 4  # FIXME
+EAPOL_UPLINK_SECONDARY_FLOW_INDEX = 5  # FIXME
+LLDP_FLOW_INDEX = 7  # FIXME
+
+EAP_ETH_TYPE = 0x888e
+LLDP_ETH_TYPE = 0x88cc
+
+# FIXME - see also BRDCM_DEFAULT_VLAN in broadcom_onu.py
+DEFAULT_MGMT_VLAN = 4091
+
+
+class BBSimOltFlowMgr(object):
+
+    def __init__(self, log, stub, device_id, logical_device_id):
+        self.log = log
+        self.stub = stub
+        self.device_id = device_id
+        self.logical_device_id = logical_device_id
+        self.logical_flows_proxy = registry('core').get_proxy(
+            '/logical_devices/{}/flows'.format(self.logical_device_id))
+        self.flows_proxy = registry('core').get_proxy(
+            '/devices/{}/flows'.format(self.device_id))
+        self.root_proxy = registry('core').get_proxy('/')
+
+    def add_flow(self, flow):
+        self.log.debug('add flow', flow=flow)
+        classifier_info = dict()
+        action_info = dict()
+
+        for field in fd.get_ofb_fields(flow):
+            if field.type == fd.ETH_TYPE:
+                classifier_info['eth_type'] = field.eth_type
+                self.log.debug('field-type-eth-type',
+                               eth_type=classifier_info['eth_type'])
+            elif field.type == fd.IP_PROTO:
+                classifier_info['ip_proto'] = field.ip_proto
+                self.log.debug('field-type-ip-proto',
+                               ip_proto=classifier_info['ip_proto'])
+            elif field.type == fd.IN_PORT:
+                classifier_info['in_port'] = field.port
+                self.log.debug('field-type-in-port',
+                               in_port=classifier_info['in_port'])
+            elif field.type == fd.VLAN_VID:
+                classifier_info['vlan_vid'] = field.vlan_vid & 0xfff
+                self.log.debug('field-type-vlan-vid',
+                               vlan=classifier_info['vlan_vid'])
+            elif field.type == fd.VLAN_PCP:
+                classifier_info['vlan_pcp'] = field.vlan_pcp
+                self.log.debug('field-type-vlan-pcp',
+                               pcp=classifier_info['vlan_pcp'])
+            elif field.type == fd.UDP_DST:
+                classifier_info['udp_dst'] = field.udp_dst
+                self.log.debug('field-type-udp-dst',
+                               udp_dst=classifier_info['udp_dst'])
+            elif field.type == fd.UDP_SRC:
+                classifier_info['udp_src'] = field.udp_src
+                self.log.debug('field-type-udp-src',
+                               udp_src=classifier_info['udp_src'])
+            elif field.type == fd.IPV4_DST:
+                classifier_info['ipv4_dst'] = field.ipv4_dst
+                self.log.debug('field-type-ipv4-dst',
+                               ipv4_dst=classifier_info['ipv4_dst'])
+            elif field.type == fd.IPV4_SRC:
+                classifier_info['ipv4_src'] = field.ipv4_src
+                self.log.debug('field-type-ipv4-src',
+                               ipv4_dst=classifier_info['ipv4_src'])
+            elif field.type == fd.METADATA:
+                classifier_info['metadata'] = field.table_metadata
+                self.log.debug('field-type-metadata',
+                               metadata=classifier_info['metadata'])
+            else:
+                raise NotImplementedError('field.type={}'.format(
+                    field.type))
+
+        for action in fd.get_actions(flow):
+            if action.type == fd.OUTPUT:
+                action_info['output'] = action.output.port
+                self.log.debug('action-type-output',
+                               output=action_info['output'],
+                               in_port=classifier_info['in_port'])
+            elif action.type == fd.POP_VLAN:
+                if fd.get_goto_table_id(flow) is None:
+                    self.log.debug('being taken care of by ONU', flow=flow)
+                    return
+                action_info['pop_vlan'] = True
+                self.log.debug('action-type-pop-vlan',
+                               in_port=classifier_info['in_port'])
+            elif action.type == fd.PUSH_VLAN:
+                action_info['push_vlan'] = True
+                action_info['tpid'] = action.push.ethertype
+                self.log.debug('action-type-push-vlan',
+                               push_tpid=action_info['tpid'],
+                               in_port=classifier_info['in_port'])
+                if action.push.ethertype != 0x8100:
+                    self.log.error('unhandled-tpid',
+                                   ethertype=action.push.ethertype)
+            elif action.type == fd.SET_FIELD:
+                # action_info['action_type'] = 'set_field'
+                _field = action.set_field.field.ofb_field
+                assert (action.set_field.field.oxm_class ==
+                        OFPXMC_OPENFLOW_BASIC)
+                self.log.debug('action-type-set-field', field=_field,
+                               in_port=classifier_info['in_port'])
+                if _field.type == fd.VLAN_VID:
+                    self.log.debug('set-field-type-vlan-vid',
+                                   vlan_vid=_field.vlan_vid & 0xfff)
+                    action_info['vlan_vid'] = (_field.vlan_vid & 0xfff)
+                else:
+                    self.log.error('unsupported-action-set-field-type',
+                                   field_type=_field.type)
+            else:
+                self.log.error('unsupported-action-type',
+                               action_type=action.type,
+                               in_port=classifier_info['in_port'])
+
+        if fd.get_goto_table_id(flow) is not None and not 'pop_vlan' in \
+                action_info:
+            self.log.debug('being taken care of by ONU', flow=flow)
+            return
+
+        if not 'output' in action_info and 'metadata' in classifier_info:
+            # find flow in the next table
+            next_flow = self.find_next_flow(flow)
+            if next_flow is None:
+                return
+            action_info['output'] = fd.get_out_port(next_flow)
+            for field in fd.get_ofb_fields(next_flow):
+                if field.type == fd.VLAN_VID:
+                    classifier_info['metadata'] = field.vlan_vid & 0xfff
+
+        (intf_id, onu_id) = platform.extract_access_from_flow(
+            classifier_info['in_port'], action_info['output'])
+
+        self.divide_and_add_flow(intf_id, onu_id, classifier_info,
+                                 action_info, flow)
+
+    def remove_flow(self, flow):
+        self.log.debug('trying to remove flows from logical flow :',
+                       logical_flow=flow)
+        device_flows_to_remove = []
+        device_flows = self.flows_proxy.get('/').items
+        for f in device_flows:
+            if f.cookie == flow.id:
+                device_flows_to_remove.append(f)
+
+        for f in device_flows_to_remove:
+            (id, direction) = self.decode_stored_id(f.id)
+            flow_to_remove = openolt_pb2.Flow(flow_id=id, flow_type=direction)
+            try:
+                self.stub.FlowRemove(flow_to_remove)
+            except grpc.RpcError as grpc_e:
+                if grpc_e.code() == grpc.StatusCode.NOT_FOUND:
+                    self.log.debug('This flow does not exist on the switch, '
+                                   'normal after an OLT reboot',
+                                   flow=flow_to_remove)
+                else:
+                    raise grpc_e
+
+            self.log.debug('flow removed from device', flow=f,
+                           flow_key=flow_to_remove)
+
+        if len(device_flows_to_remove) > 0:
+            new_flows = []
+            flows_ids_to_remove = [f.id for f in device_flows_to_remove]
+            for f in device_flows:
+                if f.id not in flows_ids_to_remove:
+                    new_flows.append(f)
+
+            self.flows_proxy.update('/', Flows(items=new_flows))
+            self.log.debug('flows removed from the data store',
+                           flow_ids_removed=flows_ids_to_remove,
+                           number_of_flows_removed=(len(device_flows) - len(
+                               new_flows)), expected_flows_removed=len(
+                            device_flows_to_remove))
+        else:
+            self.log.debug('no device flow to remove for this flow (normal '
+                           'for multi table flows)', flow=flow)
+
+    def divide_and_add_flow(self, intf_id, onu_id, classifier,
+                            action, flow):
+
+        self.log.debug('sorting flow', intf_id=intf_id, onu_id=onu_id,
+                       classifier=classifier, action=action)
+
+        if 'ip_proto' in classifier:
+            if classifier['ip_proto'] == 17:
+                self.log.debug('dhcp flow add')
+                self.add_dhcp_trap(intf_id, onu_id, classifier,
+                                   action, flow)
+            elif classifier['ip_proto'] == 2:
+                self.log.warn('igmp flow add ignored, not implemented yet')
+            else:
+                self.log.warn("Invalid-Classifier-to-handle",
+                              classifier=classifier,
+                              action=action)
+        elif 'eth_type' in classifier:
+            if classifier['eth_type'] == EAP_ETH_TYPE:
+                self.log.debug('eapol flow add')
+                self.add_eapol_flow(intf_id, onu_id, flow)
+                vlan_id = self.get_subscriber_vlan(fd.get_in_port(flow))
+                if vlan_id is not None:
+                    self.add_eapol_flow(
+                        intf_id, onu_id, flow,
+                        uplink_eapol_id=EAPOL_UPLINK_SECONDARY_FLOW_INDEX,
+                        downlink_eapol_id=EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX,
+                        vlan_id=vlan_id)
+            if classifier['eth_type'] == LLDP_ETH_TYPE:
+                self.log.debug('lldp flow add')
+                self.add_lldp_flow(intf_id, onu_id, flow, classifier,
+                                   action)
+
+        elif 'push_vlan' in action:
+            self.add_upstream_data_flow(intf_id, onu_id, classifier, action,
+                                        flow)
+        elif 'pop_vlan' in action:
+            self.add_downstream_data_flow(intf_id, onu_id, classifier,
+                                          action, flow)
+        else:
+            self.log.debug('Invalid-flow-type-to-handle',
+                           classifier=classifier,
+                           action=action, flow=flow)
+
+    def add_upstream_data_flow(self, intf_id, onu_id, uplink_classifier,
+                               uplink_action, logical_flow):
+
+        uplink_classifier['pkt_tag_type'] = 'single_tag'
+
+        self.add_hsia_flow(intf_id, onu_id, uplink_classifier,
+                           uplink_action, 'upstream', HSIA_FLOW_INDEX,
+                           logical_flow)
+
+        # Secondary EAP on the subscriber vlan
+        (eap_active, eap_logical_flow) = self.is_eap_enabled(intf_id, onu_id)
+        if eap_active:
+            self.add_eapol_flow(
+                intf_id, onu_id, eap_logical_flow,
+                uplink_eapol_id=EAPOL_UPLINK_SECONDARY_FLOW_INDEX,
+                downlink_eapol_id=EAPOL_DOWNLINK_SECONDARY_FLOW_INDEX,
+                vlan_id=uplink_classifier['vlan_vid'])
+
+    def add_downstream_data_flow(self, intf_id, onu_id, downlink_classifier,
+                                 downlink_action, flow):
+        downlink_classifier['pkt_tag_type'] = 'double_tag'
+        # Needed ???? It should be already there
+        downlink_action['pop_vlan'] = True
+        downlink_action['vlan_vid'] = downlink_classifier['vlan_vid']
+
+        self.add_hsia_flow(intf_id, onu_id, downlink_classifier,
+                           downlink_action, 'downstream', HSIA_FLOW_INDEX,
+                           flow)
+
+    # To-Do right now only one GEM port is supported, so below method
+    # will take care of handling all the p bits.
+    # We need to revisit when mulitple gem port per p bits is needed.
+    # Waiting for Technology profile
+    def add_hsia_flow(self, intf_id, onu_id, classifier, action,
+                      direction, hsia_id, logical_flow):
+
+        gemport_id = platform.mk_gemport_id(intf_id, onu_id)
+        flow_id = platform.mk_flow_id(intf_id, onu_id, hsia_id)
+
+        flow = openolt_pb2.Flow(
+                onu_id=onu_id, flow_id=flow_id, flow_type=direction,
+                access_intf_id=intf_id, gemport_id=gemport_id,
+                priority=logical_flow.priority,
+                classifier=self.mk_classifier(classifier),
+                action=self.mk_action(action))
+
+        self.add_flow_to_device(flow, logical_flow)
+
+    def add_dhcp_trap(self, intf_id, onu_id, classifier, action, logical_flow):
+
+        self.log.debug('add dhcp upstream trap', classifier=classifier,
+                       action=action)
+
+        action.clear()
+        action['trap_to_host'] = True
+        classifier['pkt_tag_type'] = 'single_tag'
+        classifier.pop('vlan_vid', None)
+
+        gemport_id = platform.mk_gemport_id(intf_id, onu_id)
+        flow_id = platform.mk_flow_id(intf_id, onu_id, DHCP_FLOW_INDEX)
+
+        upstream_flow = openolt_pb2.Flow(
+            onu_id=onu_id, flow_id=flow_id, flow_type="upstream",
+            access_intf_id=intf_id, gemport_id=gemport_id,
+            priority=logical_flow.priority,
+            classifier=self.mk_classifier(classifier),
+            action=self.mk_action(action))
+
+        self.add_flow_to_device(upstream_flow, logical_flow)
+
+        # FIXME - ONOS should send explicit upstream and downstream
+        #         exact dhcp trap flow.
+
+        downstream_logical_flow = copy.deepcopy(logical_flow)
+        for oxm_field in downstream_logical_flow.match.oxm_fields:
+            if oxm_field.ofb_field.type == OFPXMT_OFB_IN_PORT:
+                oxm_field.ofb_field.port = \
+                    platform.intf_id_to_port_no(0, Port.ETHERNET_NNI)
+
+        classifier['udp_src'] = 67
+        classifier['udp_dst'] = 68
+        classifier['pkt_tag_type'] = 'double_tag'
+        action.pop('push_vlan', None)
+
+        flow_id = platform.mk_flow_id(intf_id, onu_id,
+                                      DHCP_DOWNLINK_FLOW_INDEX)
+
+        downstream_flow = openolt_pb2.Flow(
+            onu_id=onu_id, flow_id=flow_id, flow_type="downstream",
+            access_intf_id=intf_id, network_intf_id=0, gemport_id=gemport_id,
+            priority=logical_flow.priority, classifier=self.mk_classifier(
+                classifier),
+            action=self.mk_action(action))
+
+        self.add_flow_to_device(downstream_flow, downstream_logical_flow)
+
+    def add_eapol_flow(self, intf_id, onu_id, logical_flow,
+                       uplink_eapol_id=EAPOL_FLOW_INDEX,
+                       downlink_eapol_id=EAPOL_DOWNLINK_FLOW_INDEX,
+                       vlan_id=DEFAULT_MGMT_VLAN):
+
+
+
+        uplink_classifier = {}
+        uplink_classifier['eth_type'] = EAP_ETH_TYPE
+        uplink_classifier['pkt_tag_type'] = 'single_tag'
+        uplink_classifier['vlan_vid'] = vlan_id
+
+        uplink_action = {}
+        uplink_action['trap_to_host'] = True
+
+        gemport_id = platform.mk_gemport_id(intf_id, onu_id)
+
+        # Add Upstream EAPOL Flow.
+
+        uplink_flow_id = platform.mk_flow_id(intf_id, onu_id, uplink_eapol_id)
+
+        upstream_flow = openolt_pb2.Flow(
+            onu_id=onu_id, flow_id=uplink_flow_id, flow_type="upstream",
+            access_intf_id=intf_id, gemport_id=gemport_id,
+            priority=logical_flow.priority,
+            classifier=self.mk_classifier(uplink_classifier),
+            action=self.mk_action(uplink_action))
+
+        logical_flow = copy.deepcopy(logical_flow)
+        logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([fd.vlan_vid(
+            vlan_id | 0x1000)]))
+        logical_flow.match.type = OFPMT_OXM
+
+        self.add_flow_to_device(upstream_flow, logical_flow)
+
+        if vlan_id == DEFAULT_MGMT_VLAN:
+
+            # Add Downstream EAPOL Flow, Only for first EAP flow
+
+            downlink_classifier = {}
+            downlink_classifier['pkt_tag_type'] = 'single_tag'
+            downlink_classifier['vlan_vid'] = 4000 - onu_id
+
+
+
+            downlink_action = {}
+            downlink_action['push_vlan'] = True
+            downlink_action['vlan_vid'] = vlan_id
+
+            downlink_flow_id = platform.mk_flow_id(intf_id, onu_id,
+                                                   downlink_eapol_id)
+
+            downstream_flow = openolt_pb2.Flow(
+                onu_id=onu_id, flow_id=downlink_flow_id, flow_type="downstream",
+                access_intf_id=intf_id, gemport_id=gemport_id,
+                priority=logical_flow.priority,
+                classifier=self.mk_classifier(downlink_classifier),
+                action=self.mk_action(downlink_action))
+
+            downstream_logical_flow = ofp_flow_stats(id=logical_flow.id,
+                 cookie=logical_flow.cookie, table_id=logical_flow.table_id,
+                 priority=logical_flow.priority, flags=logical_flow.flags)
+
+            downstream_logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([
+                fd.in_port(fd.get_out_port(logical_flow)),
+                fd.vlan_vid((4000 - onu_id) | 0x1000)]))
+            downstream_logical_flow.match.type = OFPMT_OXM
+
+            downstream_logical_flow.instructions.extend(
+                fd.mk_instructions_from_actions([fd.output(
+                platform.mk_uni_port_num(intf_id, onu_id))]))
+
+            self.add_flow_to_device(downstream_flow, downstream_logical_flow)
+
+    def repush_all_different_flows(self):
+        # Check if the device is supposed to have flows, if so add them
+        # Recover static flows after a reboot
+        logical_flows = self.logical_flows_proxy.get('/').items
+        devices_flows = self.flows_proxy.get('/').items
+        logical_flows_ids_provisioned = [f.cookie for f in devices_flows]
+        for logical_flow in logical_flows:
+            try:
+                if logical_flow.id not in logical_flows_ids_provisioned:
+                    self.add_flow(logical_flow)
+            except Exception as e:
+                self.log.debug('Problem readding this flow', error=e)
+
+    def reset_flows(self):
+        self.flows_proxy.update('/', Flows())
+
+    def add_lldp_flow(self, intf_id, onu_id, logical_flow, classifier, action):
+
+        self.log.debug('add lldp downstream trap', classifier=classifier,
+                       action=action)
+
+        action.clear()
+        action['trap_to_host'] = True
+        classifier['pkt_tag_type'] = 'untagged'
+
+        gemport_id = platform.mk_gemport_id(onu_id)
+        flow_id = platform.mk_flow_id(intf_id, onu_id, LLDP_FLOW_INDEX)
+
+        downstream_flow = openolt_pb2.Flow(
+            onu_id=onu_id, flow_id=flow_id, flow_type="downstream",
+            access_intf_id=3, network_intf_id=0, gemport_id=gemport_id,
+            priority=logical_flow.priority,
+            classifier=self.mk_classifier(classifier),
+            action=self.mk_action(action))
+
+        self.log.debug('add lldp downstream trap', access_intf_id=intf_id,
+                       onu_id=onu_id, flow_id=flow_id)
+        self.stub.FlowAdd(downstream_flow)
+
+    def mk_classifier(self, classifier_info):
+
+        classifier = openolt_pb2.Classifier()
+
+        if 'eth_type' in classifier_info:
+            classifier.eth_type = classifier_info['eth_type']
+        if 'ip_proto' in classifier_info:
+            classifier.ip_proto = classifier_info['ip_proto']
+        if 'vlan_vid' in classifier_info:
+            classifier.o_vid = classifier_info['vlan_vid']
+        if 'metadata' in classifier_info:
+            classifier.i_vid = classifier_info['metadata']
+        if 'vlan_pcp' in classifier_info:
+            classifier.o_pbits = classifier_info['vlan_pcp']
+        if 'udp_src' in classifier_info:
+            classifier.src_port = classifier_info['udp_src']
+        if 'udp_dst' in classifier_info:
+            classifier.dst_port = classifier_info['udp_dst']
+        if 'ipv4_dst' in classifier_info:
+            classifier.dst_ip = classifier_info['ipv4_dst']
+        if 'ipv4_src' in classifier_info:
+            classifier.src_ip = classifier_info['ipv4_src']
+        if 'pkt_tag_type' in classifier_info:
+            if classifier_info['pkt_tag_type'] == 'single_tag':
+                classifier.pkt_tag_type = 'single_tag'
+            elif classifier_info['pkt_tag_type'] == 'double_tag':
+                classifier.pkt_tag_type = 'double_tag'
+            elif classifier_info['pkt_tag_type'] == 'untagged':
+                classifier.pkt_tag_type = 'untagged'
+            else:
+                classifier.pkt_tag_type = 'none'
+
+        return classifier
+
+    def mk_action(self, action_info):
+        action = openolt_pb2.Action()
+
+        if 'pop_vlan' in action_info:
+            action.o_vid = action_info['vlan_vid']
+            action.cmd.remove_outer_tag = True
+        elif 'push_vlan' in action_info:
+            action.o_vid = action_info['vlan_vid']
+            action.cmd.add_outer_tag = True
+        elif 'trap_to_host' in action_info:
+            action.cmd.trap_to_host = True
+        else:
+            self.log.info('Invalid-action-field', action_info=action_info)
+            return
+        return action
+
+    def is_eap_enabled(self, intf_id, onu_id):
+        flows = self.logical_flows_proxy.get('/').items
+
+        for flow in flows:
+            eap_flow = False
+            eap_intf_id = None
+            eap_onu_id = None
+            for field in fd.get_ofb_fields(flow):
+                if field.type == fd.ETH_TYPE:
+                    if field.eth_type == EAP_ETH_TYPE:
+                        eap_flow = True
+                if field.type == fd.IN_PORT:
+                    eap_intf_id = platform.intf_id_from_uni_port_num(
+                        field.port)
+                    eap_onu_id = platform.onu_id_from_port_num(field.port)
+
+            if eap_flow:
+                self.log.debug('eap flow detected', onu_id=onu_id,
+                               intf_id=intf_id, eap_intf_id=eap_intf_id,
+                               eap_onu_id=eap_onu_id)
+            if eap_flow and intf_id == eap_intf_id and onu_id == eap_onu_id:
+                return (True, flow)
+
+        return (False, None)
+
+    def get_subscriber_vlan(self, port):
+        self.log.debug('looking from subscriber flow for port', port=port)
+
+        flows = self.logical_flows_proxy.get('/').items
+        for flow in flows:
+            in_port = fd.get_in_port(flow)
+            out_port = fd.get_out_port(flow)
+
+            if in_port == port and \
+                platform.intf_id_to_port_type_name(out_port) == Port.ETHERNET_NNI:
+                fields = fd.get_ofb_fields(flow)
+                self.log.debug('subscriber flow found', fields=fields)
+                for field in fields:
+                    if field.type == OFPXMT_OFB_VLAN_VID:
+                        self.log.debug('subscriber vlan found',
+                                       vlan_id=field.vlan_vid)
+                        return field.vlan_vid & 0x0fff
+        self.log.debug('No subscriber flow found', port=port)
+        return None
+
+    def add_flow_to_device(self, flow, logical_flow):
+        self.log.debug('pushing flow to device', flow=flow)
+        self.stub.FlowAdd(flow)
+        self.register_flow(logical_flow, flow)
+
+    def register_flow(self, logical_flow, device_flow):
+        self.log.debug('registering flow in device',
+                       logical_flow=logical_flow, device_flow=device_flow)
+        stored_flow = copy.deepcopy(logical_flow)
+        stored_flow.id = self.generate_stored_id(device_flow.flow_id,
+                                                 device_flow.flow_type)
+        self.log.debug('generated device flow id', id=stored_flow.id,
+                       flow_id=device_flow.flow_id,
+                       direction=device_flow.flow_type)
+        stored_flow.cookie = logical_flow.id
+        flows = self.flows_proxy.get('/')
+        flows.items.extend([stored_flow])
+        self.flows_proxy.update('/', flows)
+
+    def find_next_flow(self, flow):
+        table_id = fd.get_goto_table_id(flow)
+        metadata = 0
+        for field in fd.get_ofb_fields(flow):
+            if field.type == fd.METADATA:
+                metadata = field.table_metadata
+        if table_id is None:
+            return None
+        flows = self.logical_flows_proxy.get('/').items
+        next_flows = []
+        for f in flows:
+            if f.table_id == table_id:
+                # FIXME
+                if fd.get_in_port(f) == fd.get_in_port(flow) and \
+                        fd.get_out_port(f) == metadata:
+
+                    next_flows.append(f)
+
+        if len(next_flows) == 0:
+            self.log.warning('no next flow found, it may be a timing issue',
+                             flow=flow, number_of_flows=len(flows))
+            reactor.callLater(5, self.add_flow, flow)
+            return None
+
+        next_flows.sort(key=lambda f: f.priority, reverse=True)
+
+        return next_flows[0]
+
+    def update_children_flows(self, device_rules_map):
+
+        for device_id, (flows, groups) in device_rules_map.iteritems():
+            if device_id != self.device_id:
+                self.root_proxy.update('/devices/{}/flows'.format(device_id),
+                                       Flows(items=flows.values()))
+                self.root_proxy.update('/devices/{}/flow_groups'.format(
+                    device_id), FlowGroups(items=groups.values()))
+
+    def generate_stored_id(self, flow_id, direction):
+        if direction == 'upstream':
+            self.log.debug('upstream flow, shifting id')
+            return 0x1 << 15 | flow_id
+        elif direction == 'downstream':
+            self.log.debug('downstream flow, not shifting id')
+            return flow_id
+        else:
+            self.log.warn('Unrecognized direction', direction=direction)
+            return flow_id
+
+    def decode_stored_id(self, id):
+        if id >> 15 == 0x1:
+            return (id & 0x7fff, 'upstream')
+        else:
+            return (id, 'downstream')
diff --git a/voltha/adapters/bbsimolt/bbsimolt_platform.py b/voltha/adapters/bbsimolt/bbsimolt_platform.py
index 5da100a..bb55cbf 100644
--- a/voltha/adapters/bbsimolt/bbsimolt_platform.py
+++ b/voltha/adapters/bbsimolt/bbsimolt_platform.py
@@ -17,25 +17,23 @@
 from voltha.protos.device_pb2 import Port
 import voltha.protos.device_pb2 as dev_pb2
 
-MAX_ONUS_PER_PON = 64
-
 class BBSimOltPlatform(object):
 
-    def __init__(self, log, device_info):
+    def __init__(self, log):
         self.log = log
-        self.device_info = device_info
+        self.MAX_ONUS_PER_PON = 64
 
     def mk_alloc_id(self, intf_id, onu_id, idx=0):
         # FIXME - driver should do prefixing 1 << 10 as it is Maple specific
         # return 1<<10 | onu_id<<6 | idx
-        return 1023 + intf_id * MAX_ONUS_PER_PON + onu_id  # FIXME
+        return 1023 + intf_id * self.MAX_ONUS_PER_PON + onu_id  # FIXME
 
 
     def mk_gemport_id(self, intf_id, onu_id, idx=0):
-        return 1024 + (((MAX_ONUS_PER_PON * intf_id + onu_id - 1) * 7) + idx)
+        return 1024 + (((self.MAX_ONUS_PER_PON * intf_id + onu_id - 1) * 7) + idx)
 
     def onu_id_from_gemport_id(self, gemport_id):
-        return (((gemport_id - 1024) // 7) % MAX_ONUS_PER_PON) + 1
+        return (((gemport_id - 1024) // 7) % self.MAX_ONUS_PER_PON) + 1
 
     def mk_uni_port_num(self, intf_id, onu_id):
         return intf_id << 11 | onu_id << 4
@@ -103,4 +101,4 @@
         return False
 
     def max_onus_per_pon(self):
-        return MAX_ONUS_PER_PON
+        return self.MAX_ONUS_PER_PON
diff --git a/voltha/adapters/bbsimolt/bbsimolt_statistics.py b/voltha/adapters/bbsimolt/bbsimolt_statistics.py
new file mode 100644
index 0000000..ad4b9fa
--- /dev/null
+++ b/voltha/adapters/bbsimolt/bbsimolt_statistics.py
@@ -0,0 +1,608 @@
+#
+# Copyright 2018 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# from voltha.protos.events_pb2 import KpiEvent, MetricValuePairs
+# from voltha.protos.events_pb2 import KpiEventType
+# import voltha.adapters.openolt.openolt_platform as platform
+
+# from voltha.adapters.openolt.nni_port import NniPort
+# from voltha.adapters.openolt.pon_port import PonPort
+# from voltha.protos.device_pb2 import Port
+
+from twisted.internet import reactor, defer
+from voltha.extensions.kpi.olt.olt_pm_metrics import OltPmMetrics
+from voltha.protos.device_pb2 import PmConfig, PmConfigs, PmGroupConfig, Port
+from voltha.adapters.bbsimolt.bbsimolt_platform import BBSimOltPlatform
+
+
+class BBSimOltStatisticsMgr(object):
+    def __init__(self, bbsimolt_device, log, **kargs):
+
+        """
+        kargs are used to pass debugging flags at this time.
+        :param openolt_device:
+        :param log:
+        :param kargs:
+        """
+        self.device = bbsimolt_device
+        self.log = log
+        self.platform = BBSimOltPlatform(log)
+        # Northbound and Southbound ports
+        # added to initialize the pm_metrics
+        self.northbound_ports = self.init_ports(type="nni")
+        self.southbound_ports = self.init_ports(type='pon')
+
+        self.pm_metrics = None
+        # The following can be used to allow a standalone test routine to start
+        # the metrics independently
+        self.metrics_init = kargs.pop("metrics_init", True)
+        if self.metrics_init == True:
+            self.init_pm_metrics()
+
+    def init_pm_metrics(self):
+        # Setup PM configuration for this device
+        if self.pm_metrics is None:
+            try:
+                self.device.reason = 'setting up Performance Monitoring configuration'
+                kwargs = {
+                    'nni-ports': self.northbound_ports.values(),
+                    'pon-ports': self.southbound_ports.values()
+                }
+                self.pm_metrics = OltPmMetrics(self.device.adapter_agent, self.device.device_id,
+                                               self.device.logical_device_id,
+                                               grouped=True, freq_override=False,
+                                               **kwargs)
+                """
+                    override the default naming structures in the OltPmMetrics class.
+                    This is being done until the protos can be modified in the BAL driver
+                    
+                """
+                self.pm_metrics.nni_pm_names = (self.get_openolt_port_pm_names())['nni_pm_names']
+                self.pm_metrics.nni_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                 for (m, t) in self.pm_metrics.nni_pm_names}
+
+                self.pm_metrics.pon_pm_names = (self.get_openolt_port_pm_names())['pon_pm_names']
+                self.pm_metrics.pon_metrics_config = {m: PmConfig(name=m, type=t, enabled=True)
+                                                 for (m, t) in self.pm_metrics.pon_pm_names}
+                pm_config = self.pm_metrics.make_proto()
+                self.log.info("initial-pm-config", pm_config=pm_config)
+                self.device.adapter_agent.update_device_pm_config(pm_config, init=True)
+                # Start collecting stats from the device after a brief pause
+                reactor.callLater(10, self.pm_metrics.start_collector)
+            except Exception as e:
+                self.log.exception('pm-setup', e=e)
+
+    def port_statistics_indication(self, port_stats):
+        # self.log.info('port-stats-collected', stats=port_stats)
+        self.ports_statistics_kpis(port_stats)
+        #FIXME: etcd problem, do not update objects for now
+
+        #
+        #
+        # #FIXME : only the first uplink is a logical port
+        # if platform.intf_id_to_port_type_name(port_stats.intf_id) ==
+        #   Port.ETHERNET_NNI:
+        #     # ONOS update
+        #     self.update_logical_port_stats(port_stats)
+        # # update port object stats
+        # port = self.device.adapter_agent.get_port(self.device.device_id,
+        #     port_no=port_stats.intf_id)
+        #
+        # if port is None:
+        #     self.log.warn('port associated with this stats does not exist')
+        #     return
+        #
+        # port.rx_packets = port_stats.rx_packets
+        # port.rx_bytes = port_stats.rx_bytes
+        # port.rx_errors = port_stats.rx_error_packets
+        # port.tx_packets = port_stats.tx_packets
+        # port.tx_bytes = port_stats.tx_bytes
+        # port.tx_errors = port_stats.tx_error_packets
+        #
+        # # Add port does an update if port exists
+        # self.device.adapter_agent.add_port(self.device.device_id, port)
+
+    def flow_statistics_indication(self, flow_stats):
+        self.log.info('flow-stats-collected', stats=flow_stats)
+        # TODO: send to kafka ?
+        # FIXME: etcd problem, do not update objects for now
+        # # UNTESTED : the openolt driver does not yet provide flow stats
+        # self.device.adapter_agent.update_flow_stats(
+        #       self.device.logical_device_id,
+        #       flow_id=flow_stats.flow_id, packet_count=flow_stats.tx_packets,
+        #       byte_count=flow_stats.tx_bytes)
+
+    def ports_statistics_kpis(self, port_stats):
+        """
+        map the port stats values into a dictionary
+        Create a kpoEvent and publish to Kafka
+
+        :param port_stats:
+        :return:
+        """
+
+        try:
+            intf_id = port_stats.intf_id
+
+            if self.platform.intf_id_to_port_no(0, Port.ETHERNET_NNI) < intf_id < \
+                    self.platform.intf_id_to_port_no(4, Port.ETHERNET_NNI) :
+                """
+                for this release we are only interested in the first NNI for
+                Northbound.
+                we are not using the other 3
+                """
+                return
+            else:
+
+                pm_data = {}
+                pm_data["rx_bytes"] = port_stats.rx_bytes
+                pm_data["rx_packets"] = port_stats.rx_packets
+                pm_data["rx_ucast_packets"] = port_stats.rx_ucast_packets
+                pm_data["rx_mcast_packets"] = port_stats.rx_mcast_packets
+                pm_data["rx_bcast_packets"] = port_stats.rx_bcast_packets
+                pm_data["rx_error_packets"] = port_stats.rx_error_packets
+                pm_data["tx_bytes"] = port_stats.tx_bytes
+                pm_data["tx_packets"] = port_stats.tx_packets
+                pm_data["tx_ucast_packets"] = port_stats.tx_ucast_packets
+                pm_data["tx_mcast_packets"] = port_stats.tx_mcast_packets
+                pm_data["tx_bcast_packets"] = port_stats.tx_bcast_packets
+                pm_data["tx_error_packets"] = port_stats.tx_error_packets
+                pm_data["rx_crc_errors"] = port_stats.rx_crc_errors
+                pm_data["bip_errors"] = port_stats.bip_errors
+
+                pm_data["intf_id"] = intf_id
+
+                """
+                   Based upon the intf_id map to an nni port or a pon port
+                    the intf_id is the key to the north or south bound collections
+                    
+                    Based upon the intf_id the port object (nni_port or pon_port) will
+                    have its data attr. updated by the current dataset collected.
+                    
+                    For prefixing the rule is currently to use the port number and not the intf_id
+                    
+                """
+                #FIXME : Just use first NNI for now
+                if intf_id == self.platform.intf_id_to_port_no(0,
+                                                          Port.ETHERNET_NNI):
+                    #NNI port (just the first one)
+                    self.update_port_object_kpi_data(
+                        port_object=self.northbound_ports[port_stats.intf_id], datadict=pm_data)
+                else:
+                    #PON ports
+                    self.update_port_object_kpi_data(
+                        port_object=self.southbound_ports[port_stats.intf_id],datadict=pm_data)
+        except Exception as err:
+            self.log.exception("Error publishing kpi statistics. ", errmessage=err)
+
+    def update_logical_port_stats(self, port_stats):
+        try:
+            label = 'nni-{}'.format(port_stats.intf_id)
+            logical_port = self.device.adapter_agent.get_logical_port(
+                self.device.logical_device_id, label)
+        except KeyError as e:
+            self.log.warn('logical port was not found, it may not have been '
+                          'created yet', exception=e)
+            return
+
+        if logical_port is None:
+            self.log.error('logical-port-is-None',
+                logical_device_id=self.device.logical_device_id, label=label,
+                port_stats=port_stats)
+            return
+
+        logical_port.ofp_port_stats.rx_packets = port_stats.rx_packets
+        logical_port.ofp_port_stats.rx_bytes = port_stats.rx_bytes
+        logical_port.ofp_port_stats.tx_packets = port_stats.tx_packets
+        logical_port.ofp_port_stats.tx_bytes = port_stats.tx_bytes
+        logical_port.ofp_port_stats.rx_errors = port_stats.rx_error_packets
+        logical_port.ofp_port_stats.tx_errors = port_stats.tx_error_packets
+        logical_port.ofp_port_stats.rx_crc_err = port_stats.rx_crc_errors
+
+        self.log.debug('after-stats-update', port=logical_port)
+
+        self.device.adapter_agent.update_logical_port(
+            self.device.logical_device_id, logical_port)
+
+    """
+    The following 4 methods customer naming, the generation of the port objects, building of those
+    objects and populating new data.   The pm metrics operate on the value that are contained in the Port objects.
+    This class updates those port objects with the current data from the grpc indication and 
+    post the data on a fixed interval.
+    
+    """
+    def get_openolt_port_pm_names(self):
+        """
+        This collects a dictionary of the custom port names
+        used by the openolt.
+
+        Some of these are the same as the pm names used by the olt_pm_metrics class
+        if the set is the same then there is no need to call this method.   However, when
+        custom names are used in the protos then the specific names should be pushed into
+        the olt_pm_metrics class.
+
+        :return:
+        """
+        nni_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),  # Physical device interface ID/Port number
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('port_no', PmConfig.GAUGE),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER)
+        }
+        nni_pm_names_from_kpi_extension = {
+            ('intf_id', PmConfig.CONTEXT),  # Physical device interface ID/Port number
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER),
+            ('rx_crc_errors', PmConfig.COUNTER),
+            ('bip_errors', PmConfig.COUNTER),
+        }
+
+        # pon_names uses same structure as nmi_names with the addition of pon_id to context
+        pon_pm_names = {
+            ('pon_id', PmConfig.CONTEXT),  # PON ID (0..n)
+            ('port_no', PmConfig.CONTEXT),
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_ucast_packets', PmConfig.COUNTER),
+            ('rx_mcast_packets', PmConfig.COUNTER),
+            ('rx_bcast_packets', PmConfig.COUNTER),
+            ('rx_error_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_ucast_packets', PmConfig.COUNTER),
+            ('tx_mcast_packets', PmConfig.COUNTER),
+            ('tx_bcast_packets', PmConfig.COUNTER),
+            ('tx_error_packets', PmConfig.COUNTER)
+        }
+        pon_pm_names_from_kpi_extension = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),         # PON ID (0..n)
+
+            ('admin_state', PmConfig.STATE),
+            ('oper_status', PmConfig.STATE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+            ('tx_bip_errors', PmConfig.COUNTER),
+            ('in_service_onus', PmConfig.GAUGE),
+            ('closest_onu_distance', PmConfig.GAUGE)
+        }
+        onu_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+
+            ('fiber_length', PmConfig.GAUGE),
+            ('equalization_delay', PmConfig.GAUGE),
+            ('rssi', PmConfig.GAUGE),
+        }
+        gem_pm_names = {
+            ('intf_id', PmConfig.CONTEXT),        # Physical device port number (PON)
+            ('pon_id', PmConfig.CONTEXT),
+            ('onu_id', PmConfig.CONTEXT),
+            ('gem_id', PmConfig.CONTEXT),
+
+            ('alloc_id', PmConfig.GAUGE),
+            ('rx_packets', PmConfig.COUNTER),
+            ('rx_bytes', PmConfig.COUNTER),
+            ('tx_packets', PmConfig.COUNTER),
+            ('tx_bytes', PmConfig.COUNTER),
+        }
+        # Build a dict for the names.  The caller will index to the correct values
+        names_dict = {"nni_pm_names": nni_pm_names,
+                      "pon_pm_names": pon_pm_names,
+                      "pon_pm_names_orig": pon_pm_names_from_kpi_extension,
+                      "onu_pm_names": onu_pm_names,
+                      "gem_pm_names": gem_pm_names,
+
+                      }
+
+        return names_dict
+
+    def init_ports(self,  device_id=12345, type="nni", log=None):
+        """
+        This method collects the port objects:  nni and pon that are updated with the
+        current data from the OLT
+
+        Both the northbound (nni) and southbound ports are indexed by the interface id (intf_id)
+        and NOT the port number. When the port object is instantiated it will contain the intf_id and
+        port_no values
+
+        :param type:
+        :param device_id:
+        :param log:
+        :return:
+        """
+        try:
+            if type == "nni":
+                nni_ports = {}
+                for i in range(0, 1):
+                    nni_port = self.build_port_object(i, type='nni')
+                    nni_ports[nni_port.intf_id] = nni_port
+                return nni_ports
+            elif type == "pon":
+                pon_ports = {}
+                for i in range(0, 16):
+                    pon_port = self.build_port_object(i, type="pon")
+                    pon_ports[pon_port.intf_id] = pon_port
+                return pon_ports
+            else:
+                self.log.exception("Unmapped port type requested = " , type=type)
+                raise Exception("Unmapped port type requested = " + type)
+
+        except Exception as err:
+            raise Exception(err)
+
+    def build_port_object(self, port_num, type="nni"):
+        """
+        Seperate method to allow for updating north and southbound ports
+        newly discovered ports and devices
+
+        :param port_num:
+        :param type:
+        :return:
+        """
+        try:
+            """ 
+             This builds a port object which is added to the 
+             appropriate northbound or southbound values
+            """
+            if type == "nni":
+                kwargs = {
+                    'port_no': port_num,
+                    'intf_id': self.platform.intf_id_to_port_no(port_num,
+                                                           Port.ETHERNET_NNI),
+                    "device_id": self.device.device_id
+                }
+                nni_port = NniPort
+                port = nni_port( **kwargs)
+                return port
+            elif type == "pon":
+                # PON ports require a different configuration
+                #  intf_id and pon_id are currently equal.
+                kwargs = {
+                    'port_no': port_num,
+                    'intf_id':  self.platform.intf_id_to_port_no(port_num,
+                                                           Port.PON_OLT),
+                    'pon-id':  self.platform.intf_id_to_port_no(port_num,
+                                                           Port.PON_OLT),
+                    "device_id": self.device.device_id
+                }
+                pon_port = PonPort
+                port = pon_port(**kwargs)
+                return port
+
+            else:
+                self.log.exception("Unknown port type")
+                raise Exception("Unknown port type")
+
+        except Exception as err:
+            self.log.exception("Unknown port type", error=err)
+            raise Exception(err)
+
+    def update_port_object_kpi_data(self, port_object, datadict={}):
+        """
+        This method takes the formatted data the is marshalled from
+        the initicator collector and updates the corresponding property by
+        attr get and set.
+
+        :param port: The port class to be updated
+        :param datadict:
+        :return:
+        """
+
+        try:
+            cur_attr = ""
+            if isinstance(port_object, NniPort):
+                for k, v in datadict.items():
+                    cur_attr = k
+                    if hasattr(port_object, k):
+                        setattr(port_object, k, v)
+            elif isinstance(port_object, PonPort):
+                for k, v in datadict.items():
+                    cur_attr = k
+                    if hasattr(port_object, k):
+                        setattr(port_object, k, v)
+            else:
+                raise Exception("Must be either PON or NNI port.")
+            return
+        except Exception as err:
+            self.log.exception("Caught error updating port data: ", cur_attr=cur_attr, errormsg=err.message)
+            raise Exception(err)
+
+
+class PonPort(object):
+    """
+    This is a highly reduced version taken from the adtran pon_port.
+    TODO: Extend for use in the openolt adapter set.
+    """
+    MAX_ONUS_SUPPORTED = 256
+    DEFAULT_ENABLED = False
+    MAX_DEPLOYMENT_RANGE = 25000  # Meters (OLT-PB maximum)
+
+    _MCAST_ONU_ID = 253
+    _MCAST_ALLOC_BASE = 0x500
+
+    _SUPPORTED_ACTIVATION_METHODS = ['autodiscovery']  # , 'autoactivate']
+    _SUPPORTED_AUTHENTICATION_METHODS = ['serial-number']
+
+    def __init__(self, **kwargs):
+        assert 'pon-id' in kwargs, 'PON ID not found'
+
+        self._pon_id = kwargs['pon-id']
+        self._device_id = kwargs['device_id']
+        self._intf_id = kwargs['intf_id']
+        self._port_no = kwargs['port_no']
+        self._port_id = 0
+        # self._name = 'xpon 0/{}'.format(self._pon_id+1)
+        self._label = 'pon-{}'.format(self._pon_id)
+
+        self._onus = {}  # serial_number-base64 -> ONU  (allowed list)
+        self._onu_by_id = {}  # onu-id -> ONU
+
+        """
+        Statistics  taken from nni_port
+        self.intf_id = 0  #handled by getter
+        self.port_no = 0  #handled by getter
+        self.port_id = 0  #handled by getter  
+
+        Note:  In the current implementation of the kpis coming from the BAL the stats are the
+        samne model for NNI and PON.
+
+        TODO:   Integrate additional kpis for the PON and other southbound port objecgts.      
+
+        """
+
+        self.rx_bytes = 0
+        self.rx_packets = 0
+        self.rx_mcast_packets = 0
+        self.rx_bcast_packets = 0
+        self.rx_error_packets = 0
+        self.tx_bytes = 0
+        self.tx_packets = 0
+        self.tx_ucast_packets = 0
+        self.tx_mcast_packets = 0
+        self.tx_bcast_packets = 0
+        self.tx_error_packets = 0
+        return
+
+    def __str__(self):
+        return "PonPort-{}: Admin: {}, Oper: {}, OLT: {}".format(self._label,
+                                                                 self._admin_state,
+                                                                 self._oper_status,
+                                                                 self.olt)
+
+    @property
+    def intf_id(self):
+        return self._intf_id
+
+    @intf_id.setter
+    def intf_id(self, value):
+        self._intf_id = value
+
+    @property
+    def pon_id(self):
+        return self._pon_id
+
+    @pon_id.setter
+    def pon_id(self, value):
+        self._pon_id = value
+
+    @property
+    def port_no(self):
+        return self._port_no
+
+    @port_no.setter
+    def port_no(self, value):
+        self._port_no = value
+
+    @property
+    def port_id(self):
+        return self._port_id
+
+    @intf_id.setter
+    def port_id(self, value):
+        self._port_id = value
+
+    @property
+    def onus(self):
+        """
+        Get a set of all ONUs.  While the set is immutable, do not use this method
+        to get a collection that you will iterate through that my yield the CPU
+        such as inline callback.  ONUs may be deleted at any time and they will
+        set some references to other objects to NULL during the 'delete' call.
+        Instead, get a list of ONU-IDs and iterate on these and call the 'onu'
+        method below (which will return 'None' if the ONU has been deleted.
+
+        :return: (frozenset) collection of ONU objects on this PON
+        """
+        return frozenset(self._onus.values())
+
+    @property
+    def onu_ids(self):
+        return frozenset(self._onu_by_id.keys())
+
+    def onu(self, onu_id):
+        return self._onu_by_id.get(onu_id)
+
+
+class NniPort(object):
+    """
+    Northbound network port, often Ethernet-based
+
+    This is a highly reduced version taken from the adtran nni_port code set
+    TODO:   add functions to allow for port specific values and operations
+
+    """
+    def __init__(self, **kwargs):
+        # TODO: Extend for use in the openolt adapter set.
+        self.port_no = kwargs.get('port_no')
+        self._port_no = self.port_no
+        self._name = kwargs.get('name', 'nni-{}'.format(self._port_no))
+        self._logical_port = None
+
+        # Statistics
+        self.intf_id = kwargs.pop('intf_id', None)
+        self.port_no = 0
+        self.rx_bytes = 0
+        self.rx_packets = 0
+        self.rx_mcast_packets = 0
+        self.rx_bcast_packets = 0
+        self.rx_error_packets = 0
+        self.tx_bytes = 0
+        self.tx_packets = 0
+        self.tx_ucast_packets = 0
+        self.tx_mcast_packets = 0
+        self.tx_bcast_packets = 0
+        self.tx_error_packets = 0
+        return
+
+    def __str__(self):
+        return "NniPort-{}: Admin: {}, Oper: {}, parent: {}".format(self._port_no,
+                                                                    self._admin_state,
+                                                                    self._oper_status,
+                                                                    self._parent)
\ No newline at end of file
diff --git a/voltha/adapters/bbsimolt/protos/Makefile b/voltha/adapters/bbsimolt/protos/Makefile
new file mode 100644
index 0000000..62eacc8
--- /dev/null
+++ b/voltha/adapters/bbsimolt/protos/Makefile
@@ -0,0 +1,85 @@
+#
+# Copyright 2017 the original author or authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Makefile to build all protobuf and gRPC related artifacts
+
+ifeq ($(VOLTHA_BASE)_set,_set)
+  $(error To get started, please source the env.sh file from Voltha top level directory)
+endif
+
+default: build
+
+PROTO_FILES := $(wildcard *.proto) $(wildcard $(VOLTHA_BASE)/voltha/protos/third_party/google/api/*proto)
+PROTO_PB2_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,_pb2.py,$(f)))
+PROTO_DESC_FILES := $(foreach f,$(PROTO_FILES),$(subst .proto,.desc,$(f)))
+
+PROTOC_PREFIX := /usr/local
+PROTOC_LIBDIR := $(PROTOC_PREFIX)/lib
+
+PROTOC := $(PROTOC_PREFIX)/bin/protoc
+
+PROTOC_VERSION := "3.3.0"
+PROTOC_DOWNLOAD_PREFIX := "https://github.com/google/protobuf/releases/download"
+PROTOC_DIR := protobuf-$(PROTOC_VERSION)
+PROTOC_TARBALL := protobuf-python-$(PROTOC_VERSION).tar.gz
+PROTOC_DOWNLOAD_URI := $(PROTOC_DOWNLOAD_PREFIX)/v$(PROTOC_VERSION)/$(PROTOC_TARBALL)
+PROTOC_BUILD_TMP_DIR := "/tmp/protobuf-build-$(shell uname -s | tr '[:upper:]' '[:lower:]')"
+
+build: $(PROTOC) protos
+
+protos: $(PROTO_PB2_FILES)
+
+%_pb2.py: %.proto Makefile
+	@echo "Building protocol buffer artifacts from $<"
+	env LD_LIBRARY_PATH=$(PROTOC_LIBDIR) python -m grpc.tools.protoc \
+	    -I. \
+	    -I$(VOLTHA_BASE)/voltha/protos/third_party \
+	    --python_out=. \
+	    --grpc_python_out=. \
+	    --descriptor_set_out=$(basename $<).desc \
+	    --include_imports \
+	    --include_source_info \
+	    $<
+
+clean:
+	rm -f $(PROTO_PB2_FILES) $(PROTO_DESC_FILES)
+
+$(PROTOC):
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+	@echo "It looks like you don't have protocol buffer tools installed."
+	@echo "To install the protocol buffer toolchain, you can run:"
+	@echo "    make install-protoc"
+	@echo "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
+
+install-protoc: $(PROTOC)
+	@echo "Downloading and installing protocol buffer support."
+	@echo "Installation will require sodo priviledges"
+	@echo "This will take a few minutes."
+	mkdir -p $(PROTOC_BUILD_TMP_DIR)
+	@echo "We ask for sudo credentials now so we can install at the end"; \
+	sudo echo "Thanks"; \
+	    cd $(PROTOC_BUILD_TMP_DIR); \
+	    wget $(PROTOC_DOWNLOAD_URI); \
+	    tar xzvf $(PROTOC_TARBALL); \
+	    cd $(PROTOC_DIR); \
+	    ./configure --prefix=$(PROTOC_PREFIX); \
+	    make; \
+	    sudo make install
+
+uninstall-protoc:
+	cd $(PROTOC_BUILD_TMP_DIR)/$(PROTOC_DIR); \
+	    sudo make uninstall
+
diff --git a/voltha/adapters/bbsimolt/protos/__init__.py b/voltha/adapters/bbsimolt/protos/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/voltha/adapters/bbsimolt/protos/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/voltha/adapters/bbsimolt/protos/openolt.proto b/voltha/adapters/bbsimolt/protos/openolt.proto
new file mode 100644
index 0000000..d357334
--- /dev/null
+++ b/voltha/adapters/bbsimolt/protos/openolt.proto
@@ -0,0 +1,409 @@
+// Copyright (c) 2018 Open Networking Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at:
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+package openolt;
+import "google/api/annotations.proto";
+
+service Openolt {
+
+    rpc DisableOlt(Empty) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/Disable"
+          body: "*"
+        };
+    }
+
+    rpc ReenableOlt(Empty) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/Reenable"
+          body: "*"
+        };
+    }
+
+    rpc ActivateOnu(Onu) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/EnableOnu"
+          body: "*"
+        };
+    }
+
+    rpc DeactivateOnu(Onu) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/DisableOnu"
+          body: "*"
+        };
+    }
+
+    rpc DeleteOnu(Onu) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/DeleteOnu"
+          body: "*"
+        };
+    }
+
+    rpc OmciMsgOut(OmciMsg) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/OmciMsgOut"
+          body: "*"
+        };
+    }
+
+    rpc OnuPacketOut(OnuPacket) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/OnuPacketOut"
+          body: "*"
+        };
+    }
+
+    rpc UplinkPacketOut(UplinkPacket) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/UplinkPacketOut"
+          body: "*"
+        };
+    }
+
+    rpc FlowAdd(Flow) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/FlowAdd"
+          body: "*"
+        };
+    }
+
+    rpc FlowRemove(Flow) returns (Empty) {
+        option (google.api.http) = {
+          post: "/v1/FlowRemove"
+          body: "*"
+        };
+    }
+
+    rpc HeartbeatCheck(Empty) returns (Heartbeat) {
+        option (google.api.http) = {
+          post: "/v1/HeartbeatCheck"
+          body: "*"
+        };
+    }
+
+    rpc EnablePonIf(Interface) returns (Empty) {
+        option (google.api.http) = {
+            post: "/v1/EnablePonIf"
+            body: "*"
+        };
+    }
+
+    rpc DisablePonIf(Interface) returns (Empty) {
+        option (google.api.http) = {
+            post: "/v1/DisablePonIf"
+            body: "*"
+        };
+    }
+
+    rpc GetDeviceInfo(Empty) returns (DeviceInfo) {
+        option (google.api.http) = {
+            post: "/v1/GetDeviceInfo"
+            body: "*"
+        };
+    }
+
+    rpc Reboot(Empty) returns (Empty) {
+         option (google.api.http) = {
+            post: "/v1/Reboot"
+            body: "*"
+        };
+    }
+
+    rpc CollectStatistics(Empty) returns (Empty) {
+        option (google.api.http) = {
+            post: "/v1/CollectStatistics"
+            body: "*"
+        };
+    }
+
+    rpc EnableIndication(Empty) returns (stream Indication) {}
+}
+
+message Indication {
+    oneof data {
+        OltIndication olt_ind = 1;
+        IntfIndication intf_ind = 2;
+        IntfOperIndication intf_oper_ind = 3;
+        OnuDiscIndication onu_disc_ind = 4;
+        OnuIndication onu_ind = 5;
+        OmciIndication omci_ind = 6;
+        PacketIndication pkt_ind = 7;
+        PortStatistics port_stats = 8;
+        FlowStatistics flow_stats = 9;
+        AlarmIndication alarm_ind= 10;
+    }
+}
+
+message AlarmIndication {
+    oneof data {
+        LosIndication los_ind = 1;
+        DyingGaspIndication dying_gasp_ind = 2;
+        OnuAlarmIndication onu_alarm_ind = 3;
+        OnuStartupFailureIndication onu_startup_fail_ind = 4;
+        OnuSignalDegradeIndication onu_signal_degrade_ind = 5;
+        OnuDriftOfWindowIndication onu_drift_of_window_ind = 6;
+        OnuLossOfOmciChannelIndication onu_loss_omci_ind = 7;
+        OnuSignalsFailureIndication onu_signals_fail_ind = 8;
+        OnuTransmissionInterferenceWarning onu_tiwi_ind = 9;
+        OnuActivationFailureIndication onu_activation_fail_ind = 10;
+        OnuProcessingErrorIndication onu_processing_error_ind = 11;
+    }
+}
+
+message OltIndication {
+    string oper_state = 1;	// up, down
+}
+
+message IntfIndication {
+    fixed32 intf_id = 1;
+    string oper_state = 2;      // up, down
+}
+
+message OnuDiscIndication {
+    fixed32 intf_id = 1;
+    SerialNumber serial_number = 2;
+}
+
+message OnuIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string oper_state = 3;      // up, down
+    string admin_state = 5;     // up, down
+    SerialNumber serial_number = 4;
+}
+
+message IntfOperIndication {
+    string type = 1;		// nni, pon
+    fixed32 intf_id = 2;
+    string oper_state = 3;      // up, down
+}
+
+message OmciIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    bytes pkt = 3;
+}
+
+message PacketIndication {
+    string intf_type = 5;		// nni, pon, unknown
+    fixed32 intf_id = 1;
+    fixed32 gemport_id = 2;
+    fixed32 flow_id = 3;
+    bytes pkt = 4;
+}
+
+message Interface {
+    fixed32 intf_id = 1;
+}
+
+message Heartbeat {
+    fixed32 heartbeat_signature = 1;
+}
+
+message Onu {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    SerialNumber serial_number = 3;
+    fixed32 pir = 4;   // peak information rate assigned to onu
+    fixed32 alloc_id = 5;
+}
+
+message OmciMsg {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    bytes pkt = 3;
+}
+
+message OnuPacket {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    bytes pkt = 3;
+}
+
+message UplinkPacket {
+    fixed32 intf_id = 1;
+    bytes pkt = 2;
+}
+
+message DeviceInfo {
+    string vendor = 1;
+    string model = 2;
+    string hardware_version = 3;
+    string firmware_version = 4;
+    string technology = 5;
+    fixed32 onu_id_start = 6;
+    fixed32 onu_id_end = 7;
+    fixed32 alloc_id_start = 8;
+    fixed32 alloc_id_end = 9;
+    fixed32 gemport_id_start = 10;
+    fixed32 gemport_id_end = 11;
+    fixed32 pon_ports = 12;
+}
+
+message Classifier {
+    fixed32 o_tpid = 1;
+    fixed32 o_vid = 2;
+    fixed32 i_tpid = 3;
+    fixed32 i_vid = 4;
+    fixed32 o_pbits = 5;
+    fixed32 i_pbits = 6;
+    fixed32 eth_type = 7;
+    bytes dst_mac = 8;
+    bytes src_mac = 9;
+    fixed32 ip_proto = 10;
+    fixed32 dst_ip = 11;
+    fixed32 src_ip = 12;
+    fixed32 src_port = 13;
+    fixed32 dst_port = 14;
+    string pkt_tag_type = 15;	// untagged, single_tag, double_tag
+}
+
+message ActionCmd {
+    bool add_outer_tag = 1;
+    bool remove_outer_tag = 2;
+    bool trap_to_host = 3;
+}
+
+message Action {
+    ActionCmd cmd = 1;
+    fixed32 o_vid = 2;
+    fixed32 o_pbits = 3;
+    fixed32 o_tpid = 4;
+    fixed32 i_vid = 5;
+    fixed32 i_pbits = 6;
+    fixed32 i_tpid = 7;
+}
+
+message Flow {
+    sfixed32 access_intf_id = 1;
+    sfixed32 onu_id = 2;
+    fixed32 flow_id = 3;
+    string flow_type = 4;	// upstream, downstream, broadcast, multicast
+    sfixed32 alloc_id = 10;
+    sfixed32 network_intf_id = 5;
+    sfixed32 gemport_id = 6;
+    Classifier classifier = 7;
+    Action action = 8;
+    sfixed32 priority = 9;
+}
+
+message SerialNumber {
+    bytes vendor_id = 1;
+    bytes vendor_specific = 2;
+}
+
+message PortStatistics {
+    fixed32 intf_id = 1;
+    fixed64 rx_bytes = 2;
+    fixed64 rx_packets = 3;
+    fixed64 rx_ucast_packets = 4;
+    fixed64 rx_mcast_packets = 5;
+    fixed64 rx_bcast_packets = 6;
+    fixed64 rx_error_packets = 7;
+    fixed64 tx_bytes = 8;
+    fixed64 tx_packets = 9;
+    fixed64 tx_ucast_packets = 10;
+    fixed64 tx_mcast_packets = 11;
+    fixed64 tx_bcast_packets = 12;
+    fixed64 tx_error_packets = 13;
+    fixed64 rx_crc_errors = 14;
+    fixed64 bip_errors = 15;
+    fixed32 timestamp = 16;
+}
+
+message FlowStatistics {
+    fixed32 flow_id = 1;
+    fixed64 rx_bytes = 2;
+    fixed64 rx_packets = 3;
+    fixed64 tx_bytes = 8;
+    fixed64 tx_packets = 9;
+    fixed32 timestamp = 16;
+}
+
+message LosIndication {
+    fixed32 intf_id = 1;
+    string status = 2;
+}
+
+message DyingGaspIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+}
+
+message OnuAlarmIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string los_status = 3;
+    string lob_status = 4;
+    string lopc_miss_status = 5;
+    string lopc_mic_error_status = 6;
+}
+
+message OnuStartupFailureIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+}
+
+message OnuSignalDegradeIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+    fixed32 inverse_bit_error_rate = 4;
+}
+
+message OnuDriftOfWindowIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+    fixed32 drift = 4;
+    fixed32 new_eqd = 5;
+}
+
+message OnuLossOfOmciChannelIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+}
+
+message OnuSignalsFailureIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+    fixed32 inverse_bit_error_rate = 4;
+}
+
+message OnuTransmissionInterferenceWarning {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+    string status = 3;
+    fixed32 drift = 4;
+}
+
+message OnuActivationFailureIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+}
+
+message OnuProcessingErrorIndication {
+    fixed32 intf_id = 1;
+    fixed32 onu_id = 2;
+}
+
+
+message Empty {}
diff --git a/voltha/adapters/openolt/openolt_device.py b/voltha/adapters/openolt/openolt_device.py
index 14d48a8..fc4696a 100644
--- a/voltha/adapters/openolt/openolt_device.py
+++ b/voltha/adapters/openolt/openolt_device.py
@@ -85,6 +85,7 @@
         self.adapter_agent = kwargs['adapter_agent']
         self.device_num = kwargs['device_num']
         device = kwargs['device']
+        dpid = kwargs.get('dp_id')
 
         self.platform_class = kwargs['support_classes']['platform']
         self.resource_mgr_class = kwargs['support_classes']['resource_mgr']
@@ -113,8 +114,8 @@
 
         # If logical device does not exist create it
         if not device.parent_id:
-
-            dpid = '00:00:' + self.ip_hex(self.host_and_port.split(":")[0])
+            if dpid == None:
+                dpid = '00:00:' + self.ip_hex(self.host_and_port.split(":")[0])
 
             # Create logical OF device
             ld = LogicalDevice(
@@ -500,10 +501,9 @@
 
         if self.platform.intf_id_from_pon_port_no(onu_device.parent_port_no) \
                 != onu_indication.intf_id:
-            previous_intf_id = self.platform.intf_id_from_pon_port_no(
-                onu_device.parent_port_no),
             self.log.warn('ONU-is-on-a-different-intf-id-now',
-                          previous_intf_id=previous_intf_id,
+                          previous_intf_id=self.platform.intf_id_from_pon_port_no(
+                              onu_device.parent_port_no),
                           current_intf_id=onu_indication.intf_id)
             # FIXME - handle intf_id mismatch (ONU move?)
 
@@ -515,7 +515,7 @@
                           received_onu_id=onu_indication.onu_id)
 
         uni_no = self.platform.mk_uni_port_num(onu_indication.intf_id,
-                                               onu_indication.onu_id)
+                                          onu_indication.onu_id)
         uni_name = self.port_name(uni_no, Port.ETHERNET_UNI,
                                   serial_number=onu_device.serial_number)
 
@@ -667,8 +667,7 @@
                        flow_id=pkt_indication.flow_id)
 
         if pkt_indication.intf_type == "pon":
-            pon_intf_gemport = (pkt_indication.intf_id,
-                                pkt_indication.gemport_id)
+            pon_intf_gemport = (pkt_indication.intf_id, pkt_indication.gemport_id)
             try:
                 onu_id = int(self.resource_mgr.kv_store[pon_intf_gemport])
                 if onu_id is None:
@@ -678,8 +677,8 @@
                                gemport_id=pkt_indication.gemport_id, e=e)
                 return
 
-            logical_port_num = self.platform.mk_uni_port_num(
-                pkt_indication.intf_id, onu_id)
+            logical_port_num = self.platform.mk_uni_port_num(pkt_indication.intf_id,
+                                                        onu_id)
         elif pkt_indication.intf_type == "nni":
             logical_port_num = self.platform.intf_id_to_port_no(
                 pkt_indication.intf_id,