VOL-3407: When multiple ONUs on PON port, disable and re-enable of OLT does
not work sometimes

- Clean up old TP configuration on the ONU - if it exists.

Change-Id: If9ba7a52a88a975e8ecc9e52401b28be2217eadf
diff --git a/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py b/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py
index 40ecbb5..36cb2e1 100755
--- a/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py
+++ b/python/adapters/brcm_openomci_onu/brcm_openomci_onu_handler.py
@@ -88,6 +88,7 @@
         self.device_id = device_id
         self.proxy_address = None
         self._enabled = False
+        self._is_device_active_and_reachable = False
         self.events = None
         self._pm_metrics = None
         self._pm_metrics_started = False
@@ -168,6 +169,14 @@
     def uni_ports(self):
         return list(self._unis.values())
 
+    @property
+    def is_device_active_and_reachable(self):
+        return self._is_device_active_and_reachable
+
+    @is_device_active_and_reachable.setter
+    def is_device_active_and_reachable(self, value):
+        self._is_device_active_and_reachable = value
+
     def uni_port(self, port_no_or_name):
         if isinstance(port_no_or_name, six.string_types):
             return next((uni for uni in self.uni_ports
@@ -1422,9 +1431,16 @@
                 gem_port = self._pon.get_gem_port(del_gem_msg.gem_port_id)
                 self._tp_state_map_per_uni[uni_id][tp_id].queue_pending_delete_pon_resource(TpState.GEM_ID,
                                                                                             gem_port)
-                self.delete_tech_profile(uni_id=del_gem_msg.uni_id,
-                                         gem_port=gem_port,
-                                         tp_path=del_gem_msg.tp_path)
+                if self.is_device_active_and_reachable:
+                    self.delete_tech_profile(uni_id=del_gem_msg.uni_id,
+                                             gem_port=gem_port,
+                                             tp_path=del_gem_msg.tp_path)
+                else:
+                    self.log.debug("device-unreachable--clearing-gem-id-from-local-cache")
+                    if tp_id in self._tp_state_map_per_uni[uni_id]:
+                        self._tp_state_map_per_uni[uni_id][tp_id].pon_resource_delete_complete(TpState.GEM_ID,
+                                                                                               gem_port.gem_id)
+                    self._clear_alloc_id_gem_port_from_internal_cache(None, gem_port.gem_id)
 
             elif request.header.type == InterAdapterMessageType.DELETE_TCONT_REQUEST:
                 del_tcont_msg = InterAdapterDeleteTcontMessage()
@@ -1438,9 +1454,18 @@
                 tcont = self._pon.get_tcont(del_tcont_msg.alloc_id)
                 self._tp_state_map_per_uni[uni_id][tp_id].queue_pending_delete_pon_resource(TpState.ALLOC_ID,
                                                                                             tcont)
-                self.delete_tech_profile(uni_id=del_tcont_msg.uni_id,
-                                         tcont=tcont,
-                                         tp_path=del_tcont_msg.tp_path)
+                if self.is_device_active_and_reachable:
+                    self.delete_tech_profile(uni_id=del_tcont_msg.uni_id,
+                                             tcont=tcont,
+                                             tp_path=del_tcont_msg.tp_path)
+                else:
+                    self.log.debug("device-unreachable--clearing-tcont-from-local-cache")
+                    if tp_id in self._tp_state_map_per_uni[uni_id]:
+                        self._tp_state_map_per_uni[uni_id][tp_id].pon_resource_delete_complete(TpState.ALLOC_ID,
+                                                                                               tcont.alloc_id)
+                        self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = False
+                    self._clear_alloc_id_gem_port_from_internal_cache(tcont.alloc_id, None)
+
             else:
                 self.log.error("inter-adapter-unhandled-type", request=request)
 
@@ -1516,12 +1541,14 @@
 
             # Let TP download happen again
             for uni_id in self._tp_state_map_per_uni:
-                self._tp_state_map_per_uni[uni_id].clear()
+                for tp_id in self._tp_state_map_per_uni[uni_id]:
+                    self._tp_state_map_per_uni[uni_id][tp_id].tp_setup_done = False
 
             yield self.disable_ports(lock_ports=False)
             yield self.core_proxy.device_reason_update(self.device_id, "stopping-openomci")
             yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.DISCOVERED,
                                                       connect_status=ConnectStatus.UNREACHABLE)
+            self.is_device_active_and_reachable = False
         else:
             self.log.debug('not-changing-openomci-statemachine')
 
@@ -1532,6 +1559,7 @@
             yield self.disable_ports(lock_ports=True, device_disabled=True)
             yield self.core_proxy.device_reason_update(self.device_id, "omci-admin-lock")
             yield self.core_proxy.device_state_update(self.device_id, oper_status=OperStatus.UNKNOWN)
+            self.is_device_active_and_reachable = False
         except Exception as e:
             self.log.exception('exception-in-onu-disable', exception=e)
 
@@ -1542,6 +1570,7 @@
             yield self.core_proxy.device_state_update(device.id,
                                                       oper_status=OperStatus.ACTIVE,
                                                       connect_status=ConnectStatus.REACHABLE)
+            self.is_device_active_and_reachable = True
             yield self.core_proxy.device_reason_update(self.device_id, 'onu-reenabled')
             yield self.enable_ports()
         except Exception as e:
@@ -1704,6 +1733,7 @@
                 yield self.core_proxy.device_state_update(device.id,
                                                           oper_status=OperStatus.ACTIVE,
                                                           connect_status=ConnectStatus.REACHABLE)
+                self.is_device_active_and_reachable = True
                 yield self.enable_ports()
         else:
             self._download_mib(device)
@@ -1723,6 +1753,7 @@
             yield self.core_proxy.device_state_update(device.id,
                                                       oper_status=OperStatus.ACTIVE,
                                                       connect_status=ConnectStatus.REACHABLE)
+            self.is_device_active_and_reachable = True
             yield self.core_proxy.device_reason_update(self.device_id, 'initial-mib-downloaded')
             self._mib_download_task = None
             yield self.enable_ports()
diff --git a/python/adapters/brcm_openomci_onu/omci/brcm_tp_setup_task.py b/python/adapters/brcm_openomci_onu/omci/brcm_tp_setup_task.py
index 9e16b89..4642318 100644
--- a/python/adapters/brcm_openomci_onu/omci/brcm_tp_setup_task.py
+++ b/python/adapters/brcm_openomci_onu/omci/brcm_tp_setup_task.py
@@ -18,7 +18,8 @@
 from twisted.internet import reactor
 from twisted.internet.defer import inlineCallbacks, TimeoutError, failure
 from pyvoltha.adapters.extensions.omci.omci_me import Ont2G, OmciNullPointer, PriorityQueueFrame, \
-    Ieee8021pMapperServiceProfileFrame, MacBridgePortConfigurationDataFrame
+    Ieee8021pMapperServiceProfileFrame, MacBridgePortConfigurationDataFrame, GemInterworkingTpFrame, \
+    GemPortNetworkCtpFrame
 from pyvoltha.adapters.extensions.omci.tasks.task import Task
 from pyvoltha.adapters.extensions.omci.omci_defs import EntityOperations, ReasonCodes
 from pyvoltha.adapters.extensions.omci.omci_entities import OntG, Tcont, PriorityQueueG, Ieee8021pMapperServiceProfile, \
@@ -194,6 +195,11 @@
             if ieee_8021p_mapper_exists is False:
                 self.log.info("setting-up-8021pmapper-ani-mac-bridge-port")
                 yield self._setup__8021p_mapper__ani_mac_bridge_port()
+            else:
+                # If IEEE8021pMapper ME existed already, then we need to re-build the
+                # inter-working-tp-pointers for different gem-entity-ids. Delete the old one
+                self.log.info("delete-and-recreate-8021pmapper-ani-mac-bridge-port")
+                yield self._delete_and_recreate__8021p_mapper__ani_mac_bridge_port()
 
             tcont_idents = self._onu_device.query_mib(Tcont.class_id)
             self.log.debug('tcont-idents', tcont_idents=tcont_idents)
@@ -417,23 +423,6 @@
             #
 
             gem_entity_ids = [OmciNullPointer] * 8
-            # If IEEE8021pMapper ME existed already, then we need to re-build the
-            # inter-working-tp-pointers for different gem-entity-ids.
-            if ieee_8021p_mapper_exists:
-                self.log.debug("rebuilding-interworking-tp-pointers")
-                for k, v in ieee_8021p_mapper.items():
-                    if not isinstance(v, dict):
-                        continue
-                    # Check the entity-id of the instance matches what we expect
-                    # for this Uni/TechProfileId
-                    if k == (self._ieee_mapper_service_profile_entity_id +
-                             self._uni_port.mac_bridge_port_num + self._tp_table_id):
-                        for i in range(len(gem_entity_ids)):
-                            gem_entity_ids[i] = v.get('attributes', {}). \
-                                get('interwork_tp_pointer_for_p_bit_priority_' + str(i), OmciNullPointer)
-                        self.log.debug("interworking-tp-pointers-rebuilt-after-query-from-onu",
-                                       i_w_tp_ptr=gem_entity_ids)
-
             for gem_port in self._gem_ports:
                 self.log.debug("tp-gem-port", entity_id=gem_port.entity_id, uni_id=gem_port.uni_id)
 
@@ -538,3 +527,79 @@
         except Exception as e:
             self.log.exception('omci-setup-8021p-ani-port-setup', e=e)
             raise
+
+    @inlineCallbacks
+    def _delete_and_recreate__8021p_mapper__ani_mac_bridge_port(self):
+
+        omci_cc = self._onu_device.omci_cc
+
+        try:
+
+            # First clean up the Gem Ports references by the old 8021pMapper
+            ieee_8021p_mapper = self._onu_device.query_mib(Ieee8021pMapperServiceProfile.class_id)
+            for k, v in ieee_8021p_mapper.items():
+                if not isinstance(v, dict):
+                    continue
+                # Check the entity-id of the instance matches what we expect
+                # for this Uni/TechProfileId
+                if k == (self._ieee_mapper_service_profile_entity_id +
+                         self._uni_port.mac_bridge_port_num + self._tp_table_id):
+                    for i in range(8):
+                        gem_entity_id = v.get('attributes', {}). \
+                            get('interwork_tp_pointer_for_p_bit_priority_' + str(i), OmciNullPointer)
+                        if gem_entity_id is not OmciNullPointer:
+                            self.log.debug('remove-from-hardware', gem_id=gem_entity_id)
+                            try:
+                                msg = GemInterworkingTpFrame(gem_entity_id)
+                                frame = msg.delete()
+                                self.log.debug('openomci-msg', omci_msg=msg)
+                                results = yield omci_cc.send(frame)
+                                self.check_status_and_state(results, 'delete-gem-port-network-ctp')
+                            except Exception as e:
+                                self.log.exception('interworking-delete', e=e)
+                                raise
+
+                            try:
+                                msg = GemPortNetworkCtpFrame(gem_entity_id)
+                                frame = msg.delete()
+                                self.log.debug('openomci-msg', omci_msg=msg)
+                                results = yield omci_cc.send(frame)
+                                self.check_status_and_state(results, 'delete-gem-interworking-tp')
+                            except Exception as e:
+                                self.log.exception('gemport-delete', e=e)
+                                raise
+                    break
+
+            # Then delete 8021pMapper ME
+            msg = Ieee8021pMapperServiceProfileFrame(
+                self._ieee_mapper_service_profile_entity_id +
+                self._uni_port.mac_bridge_port_num +
+                self._tp_table_id
+            )
+            frame = msg.delete()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'delete-8021p-mapper-service-profile')
+
+            # Then delete ANI Mac Bridge port
+            msg = MacBridgePortConfigurationDataFrame(
+                self._mac_bridge_port_ani_entity_id + self._uni_port.entity_id + self._tp_table_id  # Entity ID
+            )
+            frame = msg.delete()
+            self.log.debug('openomci-msg', omci_msg=msg)
+            results = yield omci_cc.send(frame)
+            self.check_status_and_state(results, 'delete-mac-bridge-port-configuration-data')
+
+            # TODO: We need not delete the TCONT as TCONTs are pre-created. We should possibly
+            # unset the TCONTs alloc-id from a valid value to 0xffff.
+            # But this was not causing issues in my test. A separate Jira is necessary for this.
+            yield self._setup__8021p_mapper__ani_mac_bridge_port()
+
+        except TimeoutError as e:
+            self.log.warn('rx-timeout-8021p-ani-port-delete', e=e)
+            raise
+
+        except Exception as e:
+            self.log.exception('omci-setup-8021p-ani-port-delete', e=e)
+            raise
+
diff --git a/python/adapters/brcm_openomci_onu/tp_state.py b/python/adapters/brcm_openomci_onu/tp_state.py
index b6820ea..672a814 100644
--- a/python/adapters/brcm_openomci_onu/tp_state.py
+++ b/python/adapters/brcm_openomci_onu/tp_state.py
@@ -62,6 +62,10 @@
     def is_tp_delete_pending(self, is_tp_delete_pending):
         self._is_tp_delete_pending = is_tp_delete_pending
 
+    @property
+    def pending_delete_pon_res_map(self):
+        return self._pending_delete_pon_res_map
+
     def queue_pending_delete_pon_resource(self, res_type, res):
         if res_type not in self._pending_delete_pon_res_map:
             if res_type == TpState.ALLOC_ID:
@@ -75,7 +79,7 @@
                 self.log.error("unknown-res-type", res_type=res_type)
         else:
             if res_type == TpState.ALLOC_ID:
-                self.log.warn("alloc-id-already-pending-for-deletion", alloc_id=res)
+                self.log.warn("alloc-id-already-pending-for-deletion", alloc_id=res.alloc_id)
             elif res_type == TpState.GEM_ID:
                 # Make sure that we are not adding duplicate gem-port-id to the list
                 for v in self._pending_delete_pon_res_map[TpState.GEM_ID]: