VOL-172: Support configuration of  traffic descriptor profile for upstream BW allocation to ONU

 * As an operator, I should be able to configure traffic descriptor profiles for upstream BW configuration.
 * As an operator, I should be able to retrieve the configured traffic descriptor profiles.

Additional Notes:
 * xPON Handler and xPON Agent should be able to handle the traffic descriptor configuration.
 * This story does not have impact on adapters until the traffic descriptor is referenced by a TCONT
 * The traffic descriptor profiles should be saved to the database(in memory until config/restore feature is ready).
 * The impact to HA-proxy for load-balancing & distributing of workload is still TBD. As of now, it can be assumed that profiles are available to all VOLTHA instances.

VOL-173: Support configuration of TCONTs as per WT-385 and auto-allocation of alloc-ids

 * As an operator, I should be able to provision a TCONT for the ONU with an existing traffic descriptor profile
 * As an operator, I should be able to retrieve the provisioned TCONT
 * As an operator, I should be able to change the traffic descriptor profile for a TCONT

Additional Notes:

 * alloc-ids should be allocated for the TCONT
 * generic IAdapter interface to be provided that will be used by OLT and ONU adapters for TCONT/alloc-id/BW configuration
 * In the best interest of schedules/timing, in the first step(e.g. POC-3 & trial), assume the queueing model to be supported (to be detailed)  (i.e. no configuration of queueing model)
 * The concerned ONU should receive upstream grants upon provisioning of  TCONT for the ONU

VOL-174: Support configuration of GEMs as per WT-385 and auto-allocation of gemport-ids

 * As an operator, I should be able to provision a GEMPORT object for the ONU and assign to a UNI
 * As an operator, I should be able to retrieve the provisioned GEMPORT

Additional Notes:

 * gemport-ids should be auto-allocated for the GEMPORT object
 * generic IAdapter interface to be provided that will be used by OLT and ONU adapters for GEM port configuration
 * In the best interest of schedules/timing, in the first step(e.g. POC-3 & trial), assume the queueing model to be supported (to be detailed) (i.e. no configuration of queueing model)
 * The concerned OLT and ONU should be configured with the allocated gemport-ids

Change-Id: I5f4f4d61959604f0bb2565c3bbb0e4f33f4be2d3
diff --git a/tests/itests/voltha/test_voltha_xpon.py b/tests/itests/voltha/test_voltha_xpon.py
index 9136e67..2413457 100644
--- a/tests/itests/voltha/test_voltha_xpon.py
+++ b/tests/itests/voltha/test_voltha_xpon.py
@@ -180,63 +180,6 @@
             "tcont_ref": "TCont 1",
             }
         }
-    },
-    {'cg-mod': {
-        'pb2': fb.ChannelgroupConfig(),
-        'rpc': {
-            "interface": {
-                "enabled": True,
-                "name": "Manhattan",
-                "description": "Channel Group for Manhattan"
-                },
-            "data": {
-                "polling_period": 100,
-                "system_id": "000000",
-                "raman_mitigation": "RAMAN_NONE"
-                },
-            "name": "Manhattan"
-            }
-        }
-    },
-    {'gemport-del': {
-        'pb2': gemport.GemportsConfigData(),
-        'rpc': {"name": "GEMPORT 1"}}
-    },
-    {'tcont-del': {
-        'pb2': tcont.TcontsConfigData(),
-        'rpc': {"name": "TCont 1"}}
-    },
-    {'tdp-del': {
-        'pb2': tdp.TrafficDescriptorProfileData(),
-        'rpc': {"name": "TDP 1"}}
-    },
-    {'venet-del': {
-        'pb2': fb.VEnetConfig(),
-        'rpc': {"name": "Enet UNI 1"}}
-    },
-    {'ontani-del': {
-        'pb2': fb.OntaniConfig(),
-        'rpc': {"name": "Golden User"}}
-    },
-    {'vontani-del': {
-        'pb2': fb.VOntaniConfig(),
-        'rpc': {"name": "Golden User"}}
-    },
-    {'cterm-del': {
-        'pb2': fb.ChannelterminationConfig(),
-        'rpc': {"name": "PON port"}}
-    },
-    {'cpair-del': {
-        'pb2': fb.ChannelpairConfig(),
-        'rpc': {"name": "PON port"}}
-    },
-    {'cpart-del': {
-        'pb2': fb.ChannelpartitionConfig(),
-        'rpc': {"name": "Freedom Tower"}}
-    },
-    {'cg-del': {
-        'pb2': fb.ChannelgroupConfig(),
-        'rpc': {"name": "Manhattan"}}
     }
 ]
 
@@ -264,7 +207,7 @@
         self.verify_device_preprovisioned_state(device['id'])
         self.activate_device(device['id'])
 
-    def test_999_remove_device(self):
+    def _remove_device(self):
         self.deactivate_device(device['id'])
         self.delete_device(device['id'])
 
@@ -348,7 +291,14 @@
         dict1 = MessageToDict(req,
                               including_default_value_fields = True,
                               preserving_proto_field_name = True)
+        #skip comparison of READ-ONLY fields
         result['id'] = ''
+        if isinstance(req, fb.ChannelgroupConfig):
+            result['cg_index'] = 0
+        elif isinstance(req, tcont.TcontsConfigData):
+            result['alloc_id'] = 0
+        elif isinstance(req, gemport.GemportsConfigData):
+            result['gemport_id'] = 0
         return dict1 == result
 
 
diff --git a/voltha/core/xpon_handler.py b/voltha/core/xpon_handler.py
index 18b6062..71949c3 100644
--- a/voltha/core/xpon_handler.py
+++ b/voltha/core/xpon_handler.py
@@ -60,11 +60,47 @@
             investigated wrt persistency & HA design evolution, for a better
             approach in future.
         '''
-        self.cg_pool = IndexPool(2**12, 0)
+        self.cg_pool = IndexPool(2**12, 1)
+        self.cg_dict = {}
 
     def start(self, root):
         log.debug('starting xpon_handler')
         self.root = root
+        self.reinitialize_cg_ids()
+        self.reinitialize_tcont_and_gemport_ids()
+
+    def reinitialize_cg_ids(self):
+        cg_tup = ()
+        channel_groups = self.root.get('/channel_groups')
+        for cg in channel_groups:
+            cg_tup += (cg.cg_index, )
+            '''
+            Pools for handling alloc-ids and gemport-ids
+            @TODO: As per current persistency & HA design, each VOLTHA instance
+                maintains a separate independent database. Since channel-groups
+                broadcast to all the VOLTHA instances in the cluster, the
+                xpon_handler in each instance will independently try to
+                allocate a unique index. This approach works OK for XGS-PON
+                since CG<->CTerm relationship is 1:1 for XGS-PON(Since a device
+                can only be served by one VOLTHA instance and thereby CTerm).
+                This needs to be further investigated wrt persistency & HA
+                design evolution, for a better approach in future.
+            '''
+            self.cg_dict[cg.name] = {'alloc_id': IndexPool(16383, 1024)}
+            self.cg_dict[cg.name].update({'gemport_id': IndexPool(64500, 1021)})
+        self.cg_pool.pre_allocate(cg_tup)
+
+    def reinitialize_tcont_and_gemport_ids(self):
+        tconts = self.root.get('/tconts')
+        for tc in tconts:
+            cg_name = self.extract_channel_group_from_request(tc,
+                        'v_ont_anis', tc.interface_reference)
+            self.cg_dict[cg_name]['alloc_id'].pre_allocate((tc.alloc_id, ))
+        gemports = self.root.get('/gemports')
+        for gm in gemports:
+            cg_name = self.extract_channel_group_from_request(gm,
+                        'v_enets', gm.itf_ref)
+            self.cg_dict[cg_name]['gemport_id'].pre_allocate((gm.gemport_id, ))
 
     def get_all_channel_group_config(self, request, context):
         log.info('grpc-request', request=request)
@@ -89,6 +125,8 @@
             assert _id != None
             request.cg_index = _id
             self.root.add('/channel_groups', request)
+            self.cg_dict[request.name] = {'alloc_id': IndexPool(16383, 1024)}
+            self.cg_dict[request.name].update({'gemport_id': IndexPool(64500, 1021)})
 
             return Empty()
         except AssertionError, e:
@@ -97,6 +135,7 @@
             context.set_code(StatusCode.INVALID_ARGUMENT)
             return Empty()
         except ValueError:
+            self.cg_pool.release(_id)
             context.set_details(
                 'Duplicated channel group \'{}\' cannot be created'.format(
                     request.name))
@@ -846,9 +885,12 @@
         try:
             assert isinstance(request, TcontsConfigData)
             assert self.validate_interface(request, context)
-            '''
-            @TODO: Allocation of Alloc-ID
-            '''
+
+            cg_name = self.extract_channel_group_from_request(request,
+                        'v_ont_anis', request.interface_reference)
+            _id = self.cg_dict[cg_name]['alloc_id'].get_next()
+            assert _id != None
+            request.alloc_id = _id
             log.debug('creating-tcont', name=request.name)
             self.root.add('/tconts', request)
             return Empty()
@@ -860,6 +902,7 @@
             context.set_code(StatusCode.NOT_FOUND)
             return Empty()
         except ValueError:
+            self.cg_dict[cg_name]['alloc_id'].release(_id)
             context.set_details(
                 'Duplicated tcont \'{}\' cannot be created'.format(
                     request.name))
@@ -878,6 +921,8 @@
             assert self.validate_interface(request, context)
 
             path = '/tconts/{}'.format(request.name)
+            tcont = self.root.get(path)
+            request.alloc_id = tcont.alloc_id
             log.debug('updating-tcont', name=request.name)
             self.root.update(path, request, strict=True)
             return Empty()
@@ -906,8 +951,12 @@
                 'The Tcont -- \'{}\' is referenced by GemPort'.format(
                     request.name)
             path = '/tconts/{}'.format(request.name)
+            tcont = self.root.get(path)
+            cg_name = self.extract_channel_group_from_request(tcont,
+                        'v_ont_anis', tcont.interface_reference)
             log.debug('removing-tcont', name=request.name)
             self.root.remove(path)
+            self.cg_dict[cg_name]['alloc_id'].release(tcont.alloc_id)
             return Empty()
         except AssertionError, e:
             context.set_details(e.message)
@@ -929,9 +978,11 @@
         try:
             assert isinstance(request, GemportsConfigData)
             assert self.validate_interface(request, context)
-            '''
-            @TODO: Allocation of Gemport-ID
-            '''
+            cg_name = self.extract_channel_group_from_request(request,
+                        'v_enets', request.itf_ref)
+            _id = self.cg_dict[cg_name]['gemport_id'].get_next()
+            assert _id != None
+            request.gemport_id = _id
             log.debug('creating-gemport', name=request.name)
             self.root.add('/gemports', request)
             return Empty()
@@ -943,6 +994,7 @@
             context.set_code(StatusCode.NOT_FOUND)
             return Empty()
         except ValueError:
+            self.cg_dict[cg_name]['gemport_id'].release(_id)
             context.set_details(
                 'Duplicated gemport \'{}\' cannot be created'.format(
                     request.name))
@@ -961,6 +1013,8 @@
             assert self.validate_interface(request, context)
 
             path = '/gemports/{}'.format(request.name)
+            gemport = self.root.get(path)
+            request.gemport_id = gemport.gemport_id
             log.debug('updating-gemport', name=request.name)
             self.root.update(path, request, strict=True)
             return Empty()
@@ -982,8 +1036,12 @@
         try:
             assert isinstance(request, GemportsConfigData)
             path = '/gemports/{}'.format(request.name)
+            gemport = self.root.get(path)
+            cg_name = self.extract_channel_group_from_request(gemport,
+                        'v_enets', gemport.itf_ref)
             log.debug('removing-gemport', name=request.name)
             self.root.remove(path)
+            self.cg_dict[cg_name]['gemport_id'].release(gemport.gemport_id)
             return Empty()
         except AssertionError:
             context.set_details('Instance is not of gemport')
@@ -1220,3 +1278,23 @@
             log.info('reference-for-{}-not-found-\'{}\''\
                      .format(interface, reference))
             return False
+
+    def extract_channel_group_from_request(self, request, interface,
+                                     reference):
+        try:
+            path = '/{}/{}'.format(interface, reference)
+            item = self.root.get(path)
+            if isinstance(item, ChannelgroupConfig):
+                return item.name
+            elif isinstance(item, VEnetConfig):
+                return self.extract_channel_group_from_request(Empty(),
+                            'v_ont_anis', item.data.v_ontani_ref)
+            elif isinstance(item, VOntaniConfig):
+                return self.extract_channel_group_from_request(Empty(),
+                            'channel_partitions', item.data.parent_ref)
+            elif isinstance(item, ChannelpartitionConfig):
+                return self.extract_channel_group_from_request(Empty(),
+                            'channel_groups', item.data.channelgroup_ref)
+        except KeyError:
+            log.info('reference-for-{}-not found'.format(interface))
+            return Empty()