VOL-1489: Can now recieve flows incrementally

core can now deliver incremental flow updates
to the handler and flow mgr. successfully call openolt/bal
flow add for eap, dhcp, and lldp.  BAL complains still about
downstream eap flow. likely due to ID problems.

Calculation of uni_id, onu_id needs additional
info from the decomposer.

Change-Id: Ic347e71501e6cf2bb0e93d42eaf0b6c709238fbd
diff --git a/python/adapters/openolt/openolt.py b/python/adapters/openolt/openolt.py
index 9c5f799..7e1a3e6 100644
--- a/python/adapters/openolt/openolt.py
+++ b/python/adapters/openolt/openolt.py
@@ -57,7 +57,7 @@
         DeviceType(
             id=name,
             adapter=name,
-            accepts_bulk_flow_update=True,
+            accepts_bulk_flow_update=False,
             accepts_add_remove_flow_updates=True
         )
     ]
@@ -227,14 +227,17 @@
         log.debug('flows and grousp details', flows=flows, groups=groups)
         assert len(groups.items) == 0, "Cannot yet deal with groups"
         handler = self.devices[device.id]
-        return handler.update_flow_table(flows.items)
+        handler.update_flow_table(flows.items)
+
+        return device
 
     def update_flows_incrementally(self, device, flow_changes, group_changes):
         log.debug('update_flows_incrementally', device=device,
                   flow_changes=flow_changes, group_changes=group_changes)
-        log.info('This device does not allow this, therefore it is Not '
-                 'implemented')
-        raise NotImplementedError()
+        handler = self.devices[device.id]
+        handler.update_flow_table(flow_changes)
+
+        return device
 
     def update_logical_flows(self, device_id, flows_to_add, flows_to_remove,
                              groups, device_rules_map):
diff --git a/python/adapters/openolt/openolt_device.py b/python/adapters/openolt/openolt_device.py
index beea0c7..17cc6ad 100644
--- a/python/adapters/openolt/openolt_device.py
+++ b/python/adapters/openolt/openolt_device.py
@@ -290,7 +290,7 @@
                                                     self.extra_args,
                                                     self.device_info)
         self.platform = self.platform_class(self.log, self.resource_mgr)
-        self.flow_mgr = self.flow_mgr_class(self.core_proxy, self.log,
+        self.flow_mgr = self.flow_mgr_class(self.core_proxy, self.adapter_proxy, self.log,
                                             self.stub, self.device_id,
                                             self.logical_device_id,
                                             self.platform, self.resource_mgr)
@@ -967,23 +967,19 @@
                 yield self.adapter_agent.delete_port(self.device_id, port)
                 return
 
-    def update_flow_table(self, flows):
-        self.log.debug('No updates here now, all is done in logical flows '
-                       'update')
+    def update_flow_table(self, flow_changes):
 
-    def update_logical_flows(self, flows_to_add, flows_to_remove,
-                             device_rules_map):
+        self.log.debug("update_flow_table", flow_changes=flow_changes)
+
+        flows_to_add = flow_changes.to_add.items
+        flows_to_remove = flow_changes.to_remove.items
+
         if not self.is_state_up():
             self.log.info('The OLT is not up, we cannot update flows',
                           flows_to_add=[f.id for f in flows_to_add],
                           flows_to_remove=[f.id for f in flows_to_remove])
             return
 
-        try:
-            self.flow_mgr.update_children_flows(device_rules_map)
-        except Exception as e:
-            self.log.error('Error updating children flows', error=e)
-
         self.log.debug('logical flows update', flows_to_add=flows_to_add,
                        flows_to_remove=flows_to_remove)
 
@@ -1001,7 +997,8 @@
             except Exception as e:
                 self.log.error('failed to remove flow', flow=flow, e=e)
 
-        self.flow_mgr.repush_all_different_flows()
+        # TODO NEW CORE: Core keeps track of logical flows. no need to keep track.  verify, especially olt reboot!
+        #self.flow_mgr.repush_all_different_flows()
 
     # There has to be a better way to do this
     def ip_hex(self, ip):
diff --git a/python/adapters/openolt/openolt_flow_mgr.py b/python/adapters/openolt/openolt_flow_mgr.py
index b05753f..af438d1 100644
--- a/python/adapters/openolt/openolt_flow_mgr.py
+++ b/python/adapters/openolt/openolt_flow_mgr.py
@@ -19,6 +19,7 @@
 from google.protobuf.json_format import MessageToDict
 import hashlib
 from simplejson import dumps
+from twisted.internet.defer import inlineCallbacks, returnValue
 
 from voltha_protos.openflow_13_pb2 import OFPXMC_OPENFLOW_BASIC, \
     ofp_flow_stats, OFPMT_OXM, Flows, FlowGroups, OFPXMT_OFB_IN_PORT, \
@@ -26,7 +27,8 @@
 from voltha_protos.device_pb2 import Port
 import pyvoltha.common.openflow.utils as fd
 from voltha_protos import openolt_pb2
-from pyvoltha.common.utils.registry import registry
+from voltha_protos.inter_container_pb2 import SwitchCapability, PortCapability, \
+    InterAdapterMessageType, InterAdapterOmciMessage
 
 from pyvoltha.common.tech_profile.tech_profile import DEFAULT_TECH_PROFILE_TABLE_ID
 
@@ -70,21 +72,16 @@
 
 class OpenOltFlowMgr(object):
 
-    def __init__(self, adapter_agent, log, stub, device_id, logical_device_id,
+    def __init__(self, core_proxy, adapter_proxy, log, stub, device_id, logical_device_id,
                  platform, resource_mgr):
-        self.adapter_agent = adapter_agent
+        self.core_proxy = core_proxy
+        self.adapter_proxy = adapter_proxy
         self.log = log
         self.stub = stub
         self.device_id = device_id
         self.logical_device_id = logical_device_id
         self.nni_intf_id = None
         self.platform = platform
-        #self.logical_flows_proxy = registry('core').get_proxy(
-        #    '/logical_devices/{}/flows'.format(self.logical_device_id))
-        self.logical_flows_proxy = adapter_agent
-	#self.flows_proxy = registry('core').get_proxy(
-        #    '/devices/{}/flows'.format(self.device_id))
-        #self.root_proxy = registry('core').get_proxy('/')
         self.resource_mgr = resource_mgr
         self.tech_profile = dict()
         self._populate_tech_profile_per_pon_port()
@@ -196,6 +193,7 @@
         self.log.debug('flow-ports', classifier_inport=classifier_info[IN_PORT], action_output=action_info[OUTPUT])
         (port_no, intf_id, onu_id, uni_id) = self.platform.extract_access_from_flow(
             classifier_info[IN_PORT], action_info[OUTPUT])
+        self.log.debug('extracted-flow-ports', port_no=port_no, intf_id=intf_id, onu_id=onu_id, uni_id=uni_id)
 
         self.divide_and_add_flow(intf_id, onu_id, uni_id, port_no, classifier_info,
                                  action_info, flow)
@@ -313,21 +311,7 @@
             self.log.debug('no device flow to remove for this flow (normal '
                            'for multi table flows)', flow=flow)
 
-    def _get_ofp_port_name(self, intf_id, onu_id, uni_id):
-        parent_port_no = self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT)
-        child_device = self.adapter_agent.get_child_device(self.device_id,
-                                                           parent_port_no=parent_port_no, onu_id=onu_id)
-        if child_device is None:
-            self.log.error("could-not-find-child-device",
-                           parent_port_no=intf_id, onu_id=onu_id)
-            return (None, None)
-        ports = self.adapter_agent.get_ports(child_device.id, Port.ETHERNET_UNI)
-        logical_port = self.adapter_agent.get_logical_port(
-            self.logical_device_id, ports[uni_id].label)
-        ofp_port_name = (logical_port.ofp_port.name, logical_port.ofp_port.port_no)
-        return ofp_port_name
-
-    def get_tp_path(self, intf_id, ofp_port_name):
+    def get_tp_path(self, intf_id, uni):
         # FIXME Should get Table id form the flow, as of now hardcoded to
         # DEFAULT_TECH_PROFILE_TABLE_ID (64)
         # 'tp_path' contains the suffix part of the tech_profile_instance path.
@@ -335,7 +319,7 @@
         # TechProfile.KV_STORE_TECH_PROFILE_PATH_PREFIX by the ONU adapter.
         return self.tech_profile[intf_id]. \
             get_tp_path(DEFAULT_TECH_PROFILE_TABLE_ID,
-                        ofp_port_name)
+                        uni)
 
     def delete_tech_profile_instance(self, intf_id, onu_id, uni_id):
         # Remove the TP instance associated with the ONU
@@ -343,14 +327,17 @@
         tp_path = self.get_tp_path(intf_id, ofp_port_name)
         return self.tech_profile[intf_id].delete_tech_profile_instance(tp_path)
 
+    @inlineCallbacks
     def divide_and_add_flow(self, intf_id, onu_id, uni_id, port_no, classifier,
                             action, flow):
 
         self.log.debug('sorting flow', intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, port_no=port_no,
                        classifier=classifier, action=action)
 
+        uni = self.get_uni_port_path(intf_id, onu_id, uni_id)
+
         alloc_id, gem_ports = self.create_tcont_gemport(intf_id, onu_id, uni_id,
-                                                        flow.table_id)
+                                                        uni, port_no, flow.table_id)
         if alloc_id is None or gem_ports is None:
             self.log.error("alloc-id-gem-ports-unavailable", alloc_id=alloc_id,
                            gem_ports=gem_ports)
@@ -383,29 +370,30 @@
                         self.add_eapol_flow(
                             intf_id, onu_id, uni_id, port_no, flow, alloc_id, gemport_id,
                             vlan_id=vlan_id)
-                    parent_port_no = self.platform.intf_id_to_port_no(intf_id, Port.PON_OLT)
-                    onu_device = self.adapter_agent.get_child_device(self.device_id,
+                    parent_port_no = self.platform.intf_id_to_port_no(port_no, Port.PON_OLT)
+                    onu_device = yield self.core_proxy.get_child_device(self.device_id,
                                                                      onu_id=onu_id,
                                                                      parent_port_no=parent_port_no)
-                    (ofp_port_name, ofp_port_no) = self._get_ofp_port_name(intf_id, onu_id, uni_id)
-                    if ofp_port_name is None:
-                        self.log.error("port-name-not-found")
-                        return
-
-                    tp_path = self.get_tp_path(intf_id, ofp_port_name)
+                    tp_path = self.get_tp_path(intf_id, uni)
 
                     self.log.debug('Load-tech-profile-request-to-brcm-handler',
                                    tp_path=tp_path)
                     msg = {'proxy_address': onu_device.proxy_address, 'uni_id': uni_id,
                            'event': 'download_tech_profile', 'event_data': tp_path}
 
-                    # Send the event message to the ONU adapter
-                    self.adapter_agent.publish_inter_adapter_message(onu_device.id,
-                                                                     msg)
+                    # TODO NEW CORE: Create a new interadapter message type for tech profile startup for the onu
+                    # Send the tech profile event to the onu adapter
+                    #yield self.adapter_proxy.send_inter_adapter_message(
+                    #    msg=msg,
+                    #    type=InterAdapterMessageType.TECH_IND_REQUEST,
+                    #    from_adapter="openolt",
+                    #    to_adapter=onu_device.type,
+                    #    to_device_id=onu_device.id
+                    #)
 
                 if classifier[ETH_TYPE] == LLDP_ETH_TYPE:
                     self.log.debug('lldp flow add')
-                    nni_intf_id = self.get_nni_intf_id()
+                    nni_intf_id = yield self.get_nni_intf_id()
                     self.add_lldp_flow(flow, port_no, nni_intf_id)
 
             elif PUSH_VLAN in action:
@@ -419,7 +407,11 @@
                                classifier=classifier,
                                action=action, flow=flow)
 
-    def create_tcont_gemport(self, intf_id, onu_id, uni_id, table_id):
+    def get_uni_port_path(self, intf_id, onu_id, uni_id):
+        value = 'pon-{}/onu-{}/uni-{}'.format(intf_id, onu_id, uni_id)
+        return value
+
+    def create_tcont_gemport(self, intf_id, onu_id, uni_id, uni, port_no, table_id):
         alloc_id, gem_port_ids = None, None
         pon_intf_onu_id = (intf_id, onu_id)
 
@@ -432,30 +424,26 @@
             return alloc_id, gem_port_ids
 
         try:
-            (ofp_port_name, ofp_port_no) = self._get_ofp_port_name(intf_id, onu_id, uni_id)
-            if ofp_port_name is None:
-                self.log.error("port-name-not-found")
-                return alloc_id, gem_port_ids
             # FIXME: If table id is <= 63 using 64 as table id
             if table_id < DEFAULT_TECH_PROFILE_TABLE_ID:
                 table_id = DEFAULT_TECH_PROFILE_TABLE_ID
 
             # Check tech profile instance already exists for derived port name
             tech_profile_instance = self.tech_profile[intf_id]. \
-                get_tech_profile_instance(table_id, ofp_port_name)
+                get_tech_profile_instance(table_id, uni)
             self.log.debug('Get-tech-profile-instance-status', tech_profile_instance=tech_profile_instance)
 
             if tech_profile_instance is None:
                 # create tech profile instance
                 tech_profile_instance = self.tech_profile[intf_id]. \
-                    create_tech_profile_instance(table_id, ofp_port_name,
+                    create_tech_profile_instance(table_id, uni,
                                                  intf_id)
                 if tech_profile_instance is None:
                     raise Exception('Tech-profile-instance-creation-failed')
             else:
                 self.log.debug(
                     'Tech-profile-instance-already-exist-for-given port-name',
-                    ofp_port_name=ofp_port_name)
+                    table_id=table_id, intf_id=intf_id, uni=uni)
 
             # upstream scheduler
             us_scheduler = self.tech_profile[intf_id].get_us_scheduler(
@@ -471,7 +459,7 @@
             self.stub.CreateTconts(openolt_pb2.Tconts(intf_id=intf_id,
                                                       onu_id=onu_id,
                                                       uni_id=uni_id,
-                                                      port_no=ofp_port_no,
+                                                      port_no=port_no,
                                                       tconts=tconts))
 
             # Fetch alloc id and gemports from tech profile instance
@@ -529,12 +517,20 @@
                            downlink_action, DOWNSTREAM,
                            flow, alloc_id, gemport_id)
 
+    @inlineCallbacks
     def add_hsia_flow(self, intf_id, onu_id, uni_id, port_no, classifier, action,
                       direction, logical_flow, alloc_id, gemport_id):
 
+        self.log.debug('add hisa flow', flow=logical_flow, port_no=port_no,
+                       intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, gemport_id=gemport_id,
+                       alloc_id=alloc_id)
+
         flow_store_cookie = self._get_flow_store_cookie(classifier,
                                                         gemport_id)
 
+        self.log.debug('flow-store-cookie-classifer-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
+                       action=action)
+
         # One of the OLT platform (Broadcom BAL) requires that symmetric
         # flows require the same flow_id to be used across UL and DL.
         # Since HSIA flow is the only symmetric flow currently, we need to
@@ -547,9 +543,14 @@
         if flow_id is None:
             self.log.error("hsia-flow-unavailable")
             return
+
+        self.log.debug('flow-id', flow_id=flow_id)
+
+        network_intf_id = yield self.get_nni_intf_id()
+
         flow = openolt_pb2.Flow(
             access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=flow_id,
-            flow_type=direction, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            flow_type=direction, alloc_id=alloc_id, network_intf_id=network_intf_id,
             gemport_id=gemport_id,
             classifier=self.mk_classifier(classifier),
             action=self.mk_action(action),
@@ -557,7 +558,10 @@
             port_no=port_no,
             cookie=logical_flow.cookie)
 
+        self.log.debug('openolt-agent-flow', hsia_flow=flow)
+
         if self.add_flow_to_device(flow, logical_flow):
+            self.log.debug('added-hsia-openolt-agent-flow', hsia_flow=flow, logical_flow=logical_flow)
             flow_info = self._get_flow_info_as_json_blob(flow,
                                                          flow_store_cookie,
                                                          HSIA_FLOW)
@@ -565,6 +569,7 @@
                                               flow.onu_id, flow.uni_id,
                                               flow.flow_id, flow_info)
 
+    @inlineCallbacks
     def add_dhcp_trap(self, intf_id, onu_id, uni_id, port_no, classifier, action, logical_flow,
                       alloc_id, gemport_id):
 
@@ -581,20 +586,31 @@
         flow_store_cookie = self._get_flow_store_cookie(classifier,
                                                         gemport_id)
 
+        self.log.debug('flow-store-cookie-classifer-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
+                       action=action)
+
         flow_id = self.resource_mgr.get_flow_id(
             intf_id, onu_id, uni_id, flow_store_cookie
         )
+
+        self.log.debug('flow-id', flow_id=flow_id)
+
+        network_intf_id = yield self.get_nni_intf_id()
+
         dhcp_flow = openolt_pb2.Flow(
             onu_id=onu_id, uni_id=uni_id, flow_id=flow_id, flow_type=UPSTREAM,
             access_intf_id=intf_id, gemport_id=gemport_id,
-            alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            alloc_id=alloc_id, network_intf_id=network_intf_id,
             priority=logical_flow.priority,
             classifier=self.mk_classifier(classifier),
             action=self.mk_action(action),
             port_no=port_no,
             cookie=logical_flow.cookie)
 
+        self.log.debug('openolt-agent-flow', dhcp_flow=dhcp_flow)
+
         if self.add_flow_to_device(dhcp_flow, logical_flow):
+            self.log.debug('added-dhcp-openolt-agent-flow', dhcp_flow=dhcp_flow, logical_flow=logical_flow)
             flow_info = self._get_flow_info_as_json_blob(dhcp_flow, flow_store_cookie)
             self.update_flow_info_to_kv_store(dhcp_flow.access_intf_id,
                                               dhcp_flow.onu_id,
@@ -602,9 +618,14 @@
                                               dhcp_flow.flow_id,
                                               flow_info)
 
+    @inlineCallbacks
     def add_eapol_flow(self, intf_id, onu_id, uni_id, port_no, logical_flow, alloc_id,
                        gemport_id, vlan_id=DEFAULT_MGMT_VLAN):
 
+        self.log.debug('add eapol upstream trap', flow=logical_flow, port_no=port_no,
+                       intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, gemport_id=gemport_id,
+                       alloc_id=alloc_id, vlan_id=vlan_id)
+
         uplink_classifier = dict()
         uplink_classifier[ETH_TYPE] = EAP_ETH_TYPE
         uplink_classifier[PACKET_TAG_TYPE] = SINGLE_TAG
@@ -615,14 +636,22 @@
 
         flow_store_cookie = self._get_flow_store_cookie(uplink_classifier,
                                                         gemport_id)
+
+        self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, uplink_classifier=uplink_classifier,
+                       uplink_action=uplink_action)
+
         # Add Upstream EAPOL Flow.
         uplink_flow_id = self.resource_mgr.get_flow_id(
             intf_id, onu_id, uni_id, flow_store_cookie
         )
 
+        self.log.debug('flow-id', uplink_flow_id=uplink_flow_id)
+
+        network_intf_id = yield self.get_nni_intf_id()
+
         upstream_flow = openolt_pb2.Flow(
             access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=uplink_flow_id,
-            flow_type=UPSTREAM, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+            flow_type=UPSTREAM, alloc_id=alloc_id, network_intf_id=network_intf_id,
             gemport_id=gemport_id,
             classifier=self.mk_classifier(uplink_classifier),
             action=self.mk_action(uplink_action),
@@ -630,12 +659,15 @@
             port_no=port_no,
             cookie=logical_flow.cookie)
 
+        self.log.debug('openolt-agent-flow', upstream_flow=upstream_flow)
+
         logical_flow = copy.deepcopy(logical_flow)
         logical_flow.match.oxm_fields.extend(fd.mk_oxm_fields([fd.vlan_vid(
             vlan_id | 0x1000)]))
         logical_flow.match.type = OFPMT_OXM
 
         if self.add_flow_to_device(upstream_flow, logical_flow):
+            self.log.debug('added-eapol-openolt-agent-flow', upstream_flow=upstream_flow, logical_flow=logical_flow)
             flow_info = self._get_flow_info_as_json_blob(upstream_flow,
                                                          flow_store_cookie)
             self.update_flow_info_to_kv_store(upstream_flow.access_intf_id,
@@ -669,13 +701,18 @@
             flow_store_cookie = self._get_flow_store_cookie(downlink_classifier,
                                                             gemport_id)
 
+            self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, downlink_classifier=downlink_classifier,
+                           downlink_action=downlink_action)
+
             downlink_flow_id = self.resource_mgr.get_flow_id(
                 intf_id, onu_id, uni_id, flow_store_cookie
             )
 
+            self.log.debug('flow-id', downlink_flow_id=downlink_flow_id)
+
             downstream_flow = openolt_pb2.Flow(
                 access_intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=downlink_flow_id,
-                flow_type=DOWNSTREAM, alloc_id=alloc_id, network_intf_id=self.get_nni_intf_id(),
+                flow_type=DOWNSTREAM, alloc_id=alloc_id, network_intf_id=network_intf_id,
                 gemport_id=gemport_id,
                 classifier=self.mk_classifier(downlink_classifier),
                 action=self.mk_action(downlink_action),
@@ -683,6 +720,8 @@
                 port_no=port_no,
                 cookie=logical_flow.cookie)
 
+            self.log.debug('openolt-agent-flow', downstream_flow=downstream_flow)
+
             downstream_logical_flow = ofp_flow_stats(
                 id=logical_flow.id, cookie=logical_flow.cookie,
                 table_id=logical_flow.table_id, priority=logical_flow.priority,
@@ -698,6 +737,8 @@
                     self.platform.mk_uni_port_num(intf_id, onu_id, uni_id))]))
 
             if self.add_flow_to_device(downstream_flow, downstream_logical_flow):
+                self.log.debug('added-eapol-openolt-agent-flow', downstream_flow=downstream_flow,
+                               downstream_logical_flow=downstream_logical_flow)
                 flow_info = self._get_flow_info_as_json_blob(downstream_flow,
                                                              flow_store_cookie)
                 self.update_flow_info_to_kv_store(downstream_flow.access_intf_id,
@@ -727,6 +768,9 @@
 
     def add_lldp_flow(self, logical_flow, port_no, network_intf_id=0):
 
+        self.log.debug('add lldp trap flow', flow=logical_flow, port_no=port_no,
+                       network_intf_id=network_intf_id)
+
         classifier = dict()
         classifier[ETH_TYPE] = LLDP_ETH_TYPE
         classifier[PACKET_TAG_TYPE] = UNTAGGED
@@ -750,6 +794,9 @@
         flow_id = self.resource_mgr.get_flow_id(network_intf_id, onu_id, uni_id,
                                                 flow_store_cookie)
 
+        self.log.debug('flow-store-cookie-classifier-action', flow_store_cookie=flow_store_cookie, classifier=classifier,
+                       action=action)
+
         downstream_flow = openolt_pb2.Flow(
             access_intf_id=-1,  # access_intf_id not required
             onu_id=onu_id, # onu_id not required
@@ -764,9 +811,10 @@
             port_no=port_no,
             cookie=logical_flow.cookie)
 
-        self.log.debug('add lldp downstream trap', classifier=classifier,
-                       action=action, flow=downstream_flow, port_no=port_no)
+        self.log.debug('openolt-agent-flow', downstream_flow=downstream_flow)
+
         if self.add_flow_to_device(downstream_flow, logical_flow):
+            self.log.debug('added-lldp-openolt-agent-flow', downstream_flow=downstream_flow)
             self.update_flow_info_to_kv_store(network_intf_id, onu_id, uni_id,
                                               flow_id, downstream_flow)
 
@@ -881,10 +929,12 @@
                                grpc_error=grpc_e)
             return False
         else:
-            self.register_flow(logical_flow, flow)
+            # TODO NEW CORE: Should not need. Core keeps track of logical flows. no need to keep track.  verify, especially olt reboot!
+            # self.register_flow(logical_flow, flow)
             return True
 
     def update_flow_info_to_kv_store(self, intf_id, onu_id, uni_id, flow_id, flow):
+        self.log.debug("update-flow-info", intf_id=intf_id, onu_id=onu_id, uni_id=uni_id, flow_id=flow_id, flow=flow)
         self.resource_mgr.update_flow_id_info_for_uni(intf_id, onu_id, uni_id,
                                                       flow_id, flow)
 
@@ -1051,13 +1101,19 @@
             to_hash = dumps(classifier, sort_keys=True)
         return hashlib.md5(to_hash).hexdigest()[:12]
 
+    @inlineCallbacks
     def get_nni_intf_id(self):
         if self.nni_intf_id is not None:
-            return self.nni_intf_id
+            returnValue(self.nni_intf_id)
 
-        port_list = self.adapter_agent.get_ports(self.device_id, Port.ETHERNET_NNI)
-        logical_port = self.adapter_agent.get_logical_port(self.logical_device_id,
-                                                           port_list[0].label)
-        self.nni_intf_id = self.platform.intf_id_from_nni_port_num(logical_port.ofp_port.port_no)
-        self.log.debug("nni-intf-d ", nni_intf_id=self.nni_intf_id)
-        return self.nni_intf_id
+        port_list = yield self.core_proxy.get_ports(self.device_id, Port.ETHERNET_NNI)
+        self.log.debug("nni-ports-list", port_list=port_list)
+
+        # TODO: Hardcoded only first NNI
+        port = port_list.items[0]
+
+        self.log.debug("nni-port", port=port)
+        self.nni_intf_id = self.platform.intf_id_from_nni_port_num(port.port_no)
+
+        self.log.debug("nni-intf-d ", port=port.port_no, nni_intf_id=self.nni_intf_id)
+        returnValue(self.nni_intf_id)
diff --git a/python/adapters/openolt/openolt_resource_manager.py b/python/adapters/openolt/openolt_resource_manager.py
index 121f193..c0ccb14 100644
--- a/python/adapters/openolt/openolt_resource_manager.py
+++ b/python/adapters/openolt/openolt_resource_manager.py
@@ -143,12 +143,15 @@
     def get_flow_id(self, pon_intf_id, onu_id, uni_id, flow_store_cookie,
                     flow_category=None):
         pon_intf_onu_id = (pon_intf_id, onu_id, uni_id)
+        self.log.debug("get-flow-id", pon_intf_onu_id=pon_intf_onu_id)
         try:
             flow_ids = self.resource_mgrs[pon_intf_id]. \
                 get_current_flow_ids_for_onu(pon_intf_onu_id)
+            self.log.debug("get-current-flow-ids-for-onu", flow_ids=flow_ids)
             if flow_ids is not None:
                 for flow_id in flow_ids:
                     flows = self.get_flow_id_info(pon_intf_id, onu_id, uni_id, flow_id)
+                    self.log.debug("get-flow-id-info", flows=flows)
                     assert (isinstance(flows, list))
                     for flow in flows:
 
@@ -168,6 +171,7 @@
                 pon_intf_onu_id, flow_id
             )
 
+        self.log.debug("return-flow-id", flow_id=flow_id)
         return flow_id
 
     def get_flow_id_info(self, pon_intf_id, onu_id, uni_id, flow_id):