VOL-1476 Renamed adapter_agent references to core_proxy

Change-Id: I5222907c3aedcd07e5c5a583ad1250e47b035ef3
diff --git a/python/adapters/openolt/openolt.py b/python/adapters/openolt/openolt.py
index 1e81c67..0c2035a 100644
--- a/python/adapters/openolt/openolt.py
+++ b/python/adapters/openolt/openolt.py
@@ -21,6 +21,7 @@
 
 from zope.interface import implementer
 from twisted.internet import reactor
+from twisted.internet.defer import inlineCallbacks, returnValue
 from pyvoltha.adapters.iadapter import IAdapterInterface
 from pyvoltha.common.utils.registry import registry
 from voltha_protos.common_pb2 import LogLevel
@@ -133,6 +134,9 @@
         else:
             self.num_devices += 1
 
+    # TODO This is currently not used in VOLTHA 2.0 
+    # Reconcile needs to be rethought given the new architecture
+    @inlineCallbacks
     def reconcile_device(self, device):
         log.info('reconcile-device', device=device)
         kwargs = {
@@ -157,8 +161,8 @@
             self.num_devices += 1
             # Invoke the children reconciliation which would setup the
             # basic children data structures
-            self.adapter_agent.reconcile_child_devices(device.id)
-            return device
+            yield self.core_proxy.reconcile_child_devices(device.id)
+            returnValue(device)
 
     def abandon_device(self, device):
         log.info('abandon-device', device=device)
diff --git a/python/adapters/openolt/openolt_alarms.py b/python/adapters/openolt/openolt_alarms.py
index 84bd7b8..dc00b6a 100644
--- a/python/adapters/openolt/openolt_alarms.py
+++ b/python/adapters/openolt/openolt_alarms.py
@@ -15,6 +15,8 @@
 #
 
 import arrow
+
+from twisted.internet.defer import inlineCallbacks, returnValue
 from pyvoltha.adapters.extensions.alarms.adapter_alarms import AdapterAlarms
 from pyvoltha.adapters.extensions.alarms.simulator.simulate_alarms import AdapterAlarmSimulator
 from pyvoltha.adapters.extensions.alarms.olt.olt_los_alarm import OltLosAlarm
@@ -35,17 +37,17 @@
 
 
 class OpenOltAlarmMgr(object):
-    def __init__(self, log, adapter_agent, device_id, logical_device_id,
+    def __init__(self, log, core_proxy, device_id, logical_device_id,
                  platform, serial_number):
         """
-        20180711 -  Addition of adapter_agent and device_id
+        20180711 -  Addition of core_proxy and device_id
             to facilitate alarm processing and kafka posting
         :param log:
-        :param adapter_agent:
+        :param core_proxy:
         :param device_id:
         """
         self.log = log
-        self.adapter_agent = adapter_agent
+        self.core_proxy = core_proxy
         self.device_id = device_id
         self.logical_device_id = logical_device_id
         self.platform = platform
@@ -59,7 +61,7 @@
         self.enable_alarm_suppress = True
         self.alarm_suppress = {"olt_los_clear": 0, "onu_disc_raised": []}  # Keep count of alarms to limit.
         try:
-            self.alarms = AdapterAlarms(self.adapter_agent, self.device_id, self.logical_device_id, self.serial_number)
+            self.alarms = AdapterAlarms(self.core_proxy, self.device_id, self.logical_device_id, self.serial_number)
             self.simulator = AdapterAlarmSimulator(self.alarms)
         except Exception as initerr:
             self.log.exception("alarmhandler-init-error", errmsg=initerr.message)
@@ -462,6 +464,7 @@
             raise Exception(err)
         return onu_device_id, onu_serial_number
 
+    @inlineCallbacks
     def resolve_onu_id(self, onu_id, port_intf_id):
         """
         Resolve the onu_device from the intf_id value and port. Uses the adapter agent to
@@ -475,7 +478,7 @@
 
         try:
             onu_device = None
-            onu_device = self.adapter_agent.get_child_device(
+            onu_device = yield self.core_proxy.get_child_device(
                 self.device_id,
                 parent_port_no=self.platform.intf_id_to_port_no(
                     port_intf_id, device_pb2.Port.PON_OLT),
@@ -483,5 +486,5 @@
         except Exception as inner:
             self.log.exception('resolve-onu-id', errmsg=inner.message)
 
-        return onu_device
+        returnValue(onu_device)
 
diff --git a/python/adapters/openolt/openolt_device.py b/python/adapters/openolt/openolt_device.py
index 92a3521..2eaa445 100644
--- a/python/adapters/openolt/openolt_device.py
+++ b/python/adapters/openolt/openolt_device.py
@@ -123,14 +123,6 @@
             self.device.connect_status = ConnectStatus.UNREACHABLE
             self.device.oper_status = OperStatus.ACTIVATING
 
-        # If logical device does exist use it, else create one after connecting to device
-        if self.device.parent_id:
-            # logical device already exists
-            self.logical_device_id = self.device.parent_id
-            if is_reconciliation:
-                self.adapter_agent.reconcile_logical_device(
-                    self.logical_device_id)
-
         # Initialize the OLT state machine
         self.machine = Machine(model=self, states=OpenoltDevice.states,
                                transitions=OpenoltDevice.transitions,
@@ -571,11 +563,12 @@
         else:
             self.log.warn('Not-implemented-or-invalid-value-of-oper-state',
                           oper_state=onu_indication.oper_state)
-
+    @inlineCallbacks
     def onu_ports_down(self, onu_device, oper_state):
+        pass
         # Set port oper state to Discovered
         # add port will update port if it exists
-        # self.adapter_agent.add_port(
+        # yield self.core_proxy.add_port(
         #    self.device_id,
         #    Port(
         #        port_no=uni_no,
@@ -585,25 +578,6 @@
         #        oper_status=oper_state))
         # TODO this should be downning ports in onu adatper
 
-        # Disable logical port
-        onu_ports = self.proxy.get('devices/{}/ports'.format(onu_device.id))
-        for onu_port in onu_ports:
-            self.log.debug('onu-ports-down', onu_port=onu_port)
-            onu_port_id = onu_port.label
-            try:
-                onu_logical_port = self.adapter_agent.get_logical_port(
-                    logical_device_id=self.logical_device_id, port_id=onu_port_id)
-                onu_logical_port.ofp_port.state = OFPPS_LINK_DOWN
-                self.adapter_agent.update_logical_port(
-                    logical_device_id=self.logical_device_id,
-                    port=onu_logical_port)
-                self.log.debug('cascading-oper-state-to-port-and-logical-port')
-            except KeyError as e:
-                self.log.error('matching-onu-port-label-invalid',
-                               onu_id=onu_device.id, olt_id=self.device_id,
-                               onu_ports=onu_ports, onu_port_id=onu_port_id,
-                               error=e)
-
     @inlineCallbacks
     def omci_indication(self, omci_indication):
 
@@ -872,9 +846,9 @@
                 self.log.debug('delete-port',
                                onu_serial_number=child_serial_number,
                                port=port)
-                yield self.adapter_agent.delete_port(self.device_id, port)
+                yield self.core_proxy.port_removed(self.device_id, port)
                 return
-
+    
     def update_flow_table(self, flow_changes):
 
         self.log.debug("update_flow_table", flow_changes=flow_changes)
@@ -907,7 +881,7 @@
 
         # TODO NEW CORE: Core keeps track of logical flows. no need to keep track.  verify, especially olt reboot!
         #self.flow_mgr.repush_all_different_flows()
-
+ 
     # There has to be a better way to do this
     def ip_hex(self, ip):
         octets = ip.split(".")
@@ -1000,11 +974,11 @@
                        onu_device=child_device,
                        onu_serial_number=child_device.serial_number)
         try:
-            yield self.adapter_agent.delete_child_device(self.device_id,
+            yield self.core_proxy.child_device_removed(self.device_id,
                                                    child_device.id,
                                                    child_device)
         except Exception as e:
-            self.log.error('adapter_agent error', error=e)
+            self.log.error('core_proxy error', error=e)
         try:
             self.delete_logical_port(child_device)
         except Exception as e:
diff --git a/python/adapters/openolt/openolt_flow_mgr.py b/python/adapters/openolt/openolt_flow_mgr.py
index a5dd2fc..264056a 100644
--- a/python/adapters/openolt/openolt_flow_mgr.py
+++ b/python/adapters/openolt/openolt_flow_mgr.py
@@ -196,19 +196,7 @@
         self.log.debug('extracted-flow-ports', port_no=port_no, intf_id=intf_id, onu_id=onu_id, uni_id=uni_id)
 
         self.divide_and_add_flow(intf_id, onu_id, uni_id, port_no, classifier_info,
-                                 action_info, flow)
-
-    def _is_uni_port(self, port_no):
-        try:
-            port = self.adapter_agent.get_logical_port(self.logical_device_id,
-                                                       'uni-{}'.format(port_no))
-            if port is not None:
-                return (not port.root_port), port.device_id
-            else:
-                return False, None
-        except Exception as e:
-            self.log.error("error-retrieving-port", e=e)
-            return False, None
+                                            action_info, flow)
 
     def _clear_flow_id_from_rm(self, flow, flow_id, flow_direction):
         uni_port_no = None
@@ -294,23 +282,6 @@
             self.log.debug('flow removed from device', flow=f,
                            flow_key=flow_to_remove)
 
-        if len(device_flows_to_remove) > 0:
-            new_flows = []
-            flows_ids_to_remove = [f.id for f in device_flows_to_remove]
-            for f in device_flows:
-                if f.id not in flows_ids_to_remove:
-                    new_flows.append(f)
-
-            self.flows_proxy.update('/', Flows(items=new_flows))
-            self.log.debug('flows removed from the data store',
-                           flow_ids_removed=flows_ids_to_remove,
-                           number_of_flows_removed=(len(device_flows) - len(
-                               new_flows)), expected_flows_removed=len(
-                    device_flows_to_remove))
-        else:
-            self.log.debug('no device flow to remove for this flow (normal '
-                           'for multi table flows)', flow=flow)
-
     def get_tp_path(self, intf_id, uni):
         # FIXME Should get Table id form the flow, as of now hardcoded to
         # DEFAULT_TECH_PROFILE_TABLE_ID (64)
diff --git a/python/adapters/openolt/openolt_statistics.py b/python/adapters/openolt/openolt_statistics.py
index 38e9615..5bade18 100644
--- a/python/adapters/openolt/openolt_statistics.py
+++ b/python/adapters/openolt/openolt_statistics.py
@@ -90,7 +90,7 @@
         #     # ONOS update
         #     self.update_logical_port_stats(port_stats)
         # # update port object stats
-        # port = self.device.adapter_agent.get_port(self.device.device_id,
+        # port = self.device.core_proxy.get_port(self.device.device_id,
         #     port_no=port_stats.intf_id)
         #
         # if port is None:
@@ -105,14 +105,14 @@
         # port.tx_errors = port_stats.tx_error_packets
         #
         # # Add port does an update if port exists
-        # self.device.adapter_agent.add_port(self.device.device_id, port)
+        # self.device.core_proxy.add_port(self.device.device_id, port)
 
     def flow_statistics_indication(self, flow_stats):
         self.log.info('flow-stats-collected', stats=flow_stats)
         # TODO: send to kafka ?
         # FIXME: etcd problem, do not update objects for now
         # # UNTESTED : the openolt driver does not yet provide flow stats
-        # self.device.adapter_agent.update_flow_stats(
+        # self.device.core_proxy.update_flow_stats(
         #       self.device.logical_device_id,
         #       flow_id=flow_stats.flow_id, packet_count=flow_stats.tx_packets,
         #       byte_count=flow_stats.tx_bytes)
@@ -180,35 +180,6 @@
         except Exception as err:
             self.log.exception("Error publishing kpi statistics. ", errmessage=err)
 
-    def update_logical_port_stats(self, port_stats):
-        try:
-            label = 'nni-{}'.format(port_stats.intf_id)
-            logical_port = self.device.adapter_agent.get_logical_port(
-                self.device.logical_device_id, label)
-        except KeyError as e:
-            self.log.warn('logical port was not found, it may not have been '
-                          'created yet', exception=e)
-            return
-
-        if logical_port is None:
-            self.log.error('logical-port-is-None',
-                logical_device_id=self.device.logical_device_id, label=label,
-                port_stats=port_stats)
-            return
-
-        logical_port.ofp_port_stats.rx_packets = port_stats.rx_packets
-        logical_port.ofp_port_stats.rx_bytes = port_stats.rx_bytes
-        logical_port.ofp_port_stats.tx_packets = port_stats.tx_packets
-        logical_port.ofp_port_stats.tx_bytes = port_stats.tx_bytes
-        logical_port.ofp_port_stats.rx_errors = port_stats.rx_error_packets
-        logical_port.ofp_port_stats.tx_errors = port_stats.tx_error_packets
-        logical_port.ofp_port_stats.rx_crc_err = port_stats.rx_crc_errors
-
-        self.log.debug('after-stats-update', port=logical_port)
-
-        self.device.adapter_agent.update_logical_port(
-            self.device.logical_device_id, logical_port)
-
     """
     The following 4 methods customer naming, the generation of the port objects, building of those
     objects and populating new data.   The pm metrics operate on the value that are contained in the Port objects.