VOL-1221: OpenOLT Adapter/Driver will use a Technology Profile Instance to create the OLT Upstream and Downstream Queuing and Scheduling Constructs for a Bidirectional Flow.

Change-Id: Iaf1a782529e2c459c586b158bd4f6447f548e004
diff --git a/common/pon_resource_manager/resource_manager.py b/common/pon_resource_manager/resource_manager.py
index aa1b6ca..9e249fb 100644
--- a/common/pon_resource_manager/resource_manager.py
+++ b/common/pon_resource_manager/resource_manager.py
@@ -21,13 +21,14 @@
 uses a KV store in backend to ensure resiliency of the data.
 """
 import json
+import ast
 import structlog
 from bitstring import BitArray
-from ast import literal_eval
 import shlex
 from argparse import ArgumentParser, ArgumentError
 
 from common.pon_resource_manager.resource_kv_store import ResourceKvStore
+from common.tech_profile.tech_profile import TechProfile
 
 
 # Used to parse extra arguments to OpenOlt adapter from the NBI
@@ -45,6 +46,7 @@
     ONU_ID = 'ONU_ID'
     ALLOC_ID = 'ALLOC_ID'
     GEMPORT_ID = 'GEMPORT_ID'
+    FLOW_ID = 'FLOW_ID'
 
     # Constants for passing command line arugments
     OLT_MODEL_ARG = '--olt_model'
@@ -63,6 +65,8 @@
         "alloc_id_end": 2816,
         "gemport_id_start": 1024,
         "gemport_id_end": 8960,
+        "flow_id_start": 1,
+        "flow_id_end": 16383,
         "pon_ports": 16
     }
 
@@ -78,6 +82,9 @@
     GEMPORT_ID_START_IDX = "gemport_id_start"
     GEMPORT_ID_END_IDX = "gemport_id_end"
     GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+    FLOW_ID_START_IDX = "flow_id_start"
+    FLOW_ID_END_IDX = "flow_id_end"
+    FLOW_ID_SHARED_IDX = "flow_id_shared"
     NUM_OF_PON_PORT = "pon_ports"
 
     # PON Resource range configuration on the KV store.
@@ -90,6 +97,7 @@
     ALLOC_ID_POOL_PATH = '{}/alloc_id_pool/{}'
     GEMPORT_ID_POOL_PATH = '{}/gemport_id_pool/{}'
     ONU_ID_POOL_PATH = '{}/onu_id_pool/{}'
+    FLOW_ID_POOL_PATH = '{}/flow_id_pool/{}'
 
     # Path on the KV store for storing list of alloc IDs for a given ONU
     # Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
@@ -99,6 +107,14 @@
     # Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
     GEMPORT_ID_RESOURCE_MAP_PATH = '{}/{}/gemport_ids'
 
+    # Path on the KV store for storing list of Flow IDs for a given ONU
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+    FLOW_ID_RESOURCE_MAP_PATH = '{}/{}/flow_ids'
+
+    # Flow Id info: Use to store more metadata associated with the flow_id
+    # Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+    FLOW_ID_INFO_PATH = '{}/{}/flow_id_info/{}'
+
     # Constants for internal usage.
     PON_INTF_ID = 'pon_intf_id'
     START_IDX = 'start_idx'
@@ -133,6 +149,7 @@
 
             self._kv_store = ResourceKvStore(technology, device_id, backend,
                                              host, port)
+            self.tech_profile = TechProfile(self)
 
             # Below attribute, pon_resource_ranges, should be initialized
             # by reading from KV store.
@@ -140,16 +157,19 @@
             self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX] = None
             self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX] = None
             self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
+            self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX] = None
 
             self.shared_resource_mgrs = dict()
             self.shared_resource_mgrs[PONResourceManager.ONU_ID_SHARED_IDX] = None
             self.shared_resource_mgrs[PONResourceManager.ALLOC_ID_SHARED_IDX] = None
             self.shared_resource_mgrs[PONResourceManager.GEMPORT_ID_SHARED_IDX] = None
+            self.shared_resource_mgrs[PONResourceManager.FLOW_ID_SHARED_IDX] = None
 
             self.shared_idx_by_type = dict()
             self.shared_idx_by_type[PONResourceManager.ONU_ID] = PONResourceManager.ONU_ID_SHARED_IDX
             self.shared_idx_by_type[PONResourceManager.ALLOC_ID] = PONResourceManager.ALLOC_ID_SHARED_IDX
             self.shared_idx_by_type[PONResourceManager.GEMPORT_ID] = PONResourceManager.GEMPORT_ID_SHARED_IDX
+            self.shared_idx_by_type[PONResourceManager.FLOW_ID] = PONResourceManager.FLOW_ID_SHARED_IDX
 
             self.intf_ids = None
 
@@ -193,19 +213,18 @@
                                 e=e)
         return False
 
-
     def update_range_(self, start_idx, start, end_idx, end, shared_idx, shared_pool_id, shared_resource_mgr):
         if (start is not None) and \
-           (start_idx not in self.pon_resource_ranges or self.pon_resource_ranges[start_idx] < start):
-              self.pon_resource_ranges[start_idx] = start
+                (start_idx not in self.pon_resource_ranges or self.pon_resource_ranges[start_idx] < start):
+            self.pon_resource_ranges[start_idx] = start
         if (end is not None) and \
-           (end_idx not in self.pon_resource_ranges or self.pon_resource_ranges[end_idx] > end):
-              self.pon_resource_ranges[end_idx] = end
+                (end_idx not in self.pon_resource_ranges or self.pon_resource_ranges[end_idx] > end):
+            self.pon_resource_ranges[end_idx] = end
         if (shared_pool_id is not None) and \
-           (shared_idx not in self.pon_resource_ranges or self.pon_resource_ranges[shared_idx] is None):
+                (shared_idx not in self.pon_resource_ranges or self.pon_resource_ranges[shared_idx] is None):
             self.pon_resource_ranges[shared_idx] = shared_pool_id
         if (shared_resource_mgr is not None) and \
-           (shared_idx not in self.shared_resource_mgrs or self.shared_resource_mgrs[shared_idx] is None):
+                (shared_idx not in self.shared_resource_mgrs or self.shared_resource_mgrs[shared_idx] is None):
             self.shared_resource_mgrs[shared_idx] = shared_resource_mgr
 
     def update_ranges(self,
@@ -220,22 +239,31 @@
                       gemport_id_start_idx=None,
                       gemport_id_end_idx=None,
                       gemport_id_shared_pool_id=None,
-                      gemport_id_shared_resource_mgr=None):
+                      gemport_id_shared_resource_mgr=None,
+                      flow_id_start_idx=None,
+                      flow_id_end_idx=None,
+                      flow_id_shared_pool_id=None,
+                      flow_id_shared_resource_mgr=None):
 
         self.update_range_(PONResourceManager.ONU_ID_START_IDX, onu_id_start_idx,
-                          PONResourceManager.ONU_ID_END_IDX, onu_id_end_idx,
-                          PONResourceManager.ONU_ID_SHARED_IDX, onu_id_shared_pool_id,
-                          onu_id_shared_resource_mgr)
+                           PONResourceManager.ONU_ID_END_IDX, onu_id_end_idx,
+                           PONResourceManager.ONU_ID_SHARED_IDX, onu_id_shared_pool_id,
+                           onu_id_shared_resource_mgr)
 
         self.update_range_(PONResourceManager.ALLOC_ID_START_IDX, alloc_id_start_idx,
-                          PONResourceManager.ALLOC_ID_END_IDX, alloc_id_end_idx,
-                          PONResourceManager.ALLOC_ID_SHARED_IDX, alloc_id_shared_pool_id,
-                          alloc_id_shared_resource_mgr)
+                           PONResourceManager.ALLOC_ID_END_IDX, alloc_id_end_idx,
+                           PONResourceManager.ALLOC_ID_SHARED_IDX, alloc_id_shared_pool_id,
+                           alloc_id_shared_resource_mgr)
 
         self.update_range_(PONResourceManager.GEMPORT_ID_START_IDX, gemport_id_start_idx,
-                          PONResourceManager.GEMPORT_ID_END_IDX, gemport_id_end_idx,
-                          PONResourceManager.GEMPORT_ID_SHARED_IDX, gemport_id_shared_pool_id,
-                          gemport_id_shared_resource_mgr)
+                           PONResourceManager.GEMPORT_ID_END_IDX, gemport_id_end_idx,
+                           PONResourceManager.GEMPORT_ID_SHARED_IDX, gemport_id_shared_pool_id,
+                           gemport_id_shared_resource_mgr)
+
+        self.update_range_(PONResourceManager.FLOW_ID_START_IDX, flow_id_start_idx,
+                           PONResourceManager.FLOW_ID_END_IDX, flow_id_end_idx,
+                           PONResourceManager.FLOW_ID_SHARED_IDX, flow_id_shared_pool_id,
+                           flow_id_shared_resource_mgr)
 
     def init_default_pon_resource_ranges(self,
                                          onu_id_start_idx=1,
@@ -247,6 +275,9 @@
                                          gemport_id_start_idx=1024,
                                          gemport_id_end_idx=8960,
                                          gemport_id_shared_pool_id=None,
+                                         flow_id_start_idx=1,
+                                         flow_id_end_idx=16383,
+                                         flow_id_shared_pool_id=None,
                                          num_of_pon_ports=16,
                                          intf_ids=None):
         """
@@ -261,6 +292,9 @@
         :param gemport_id_start_idx: gemport id start index
         :param gemport_id_end_idx: gemport id end index
         :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+        :param flow_id_start_idx: flow id start index
+        :param flow_id_end_idx: flow id end index
+        :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
         :param num_of_pon_ports: number of PON ports
         :param intf_ids: interfaces serviced by this manager
         """
@@ -268,7 +302,8 @@
 
         self.update_ranges(onu_id_start_idx, onu_id_end_idx, onu_id_shared_pool_id, None,
                            alloc_id_start_idx, alloc_id_end_idx, alloc_id_shared_pool_id, None,
-                           gemport_id_start_idx, gemport_id_end_idx, gemport_id_shared_pool_id, None)
+                           gemport_id_start_idx, gemport_id_end_idx, gemport_id_shared_pool_id, None,
+                           flow_id_start_idx, flow_id_end_idx, flow_id_shared_pool_id, None)
 
         if intf_ids is None:
             intf_ids = range(0, num_of_pon_ports)
@@ -281,11 +316,12 @@
         """
 
         self._log.info("init-device-resource-pool", technology=self.technology,
-            pon_resource_ranges=self.pon_resource_ranges)
+                       pon_resource_ranges=self.pon_resource_ranges)
 
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.init_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.ONU_ID,
@@ -293,11 +329,13 @@
                     PONResourceManager.ONU_ID_START_IDX],
                 end_idx=self.pon_resource_ranges[
                     PONResourceManager.ONU_ID_END_IDX])
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
 
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.init_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.ALLOC_ID,
@@ -305,11 +343,13 @@
                     PONResourceManager.ALLOC_ID_START_IDX],
                 end_idx=self.pon_resource_ranges[
                     PONResourceManager.ALLOC_ID_END_IDX])
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
 
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.init_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.GEMPORT_ID,
@@ -317,7 +357,22 @@
                     PONResourceManager.GEMPORT_ID_START_IDX],
                 end_idx=self.pon_resource_ranges[
                     PONResourceManager.GEMPORT_ID_END_IDX])
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.init_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.FLOW_ID,
+                start_idx=self.pon_resource_ranges[
+                    PONResourceManager.FLOW_ID_START_IDX],
+                end_idx=self.pon_resource_ranges[
+                    PONResourceManager.FLOW_ID_END_IDX])
+            if shared_pool_id is not None:
+                break
 
     def clear_device_resource_pool(self):
         """
@@ -325,30 +380,47 @@
         """
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.ONU_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.clear_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.ONU_ID,
             )
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
 
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.ALLOC_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.clear_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.ALLOC_ID,
             )
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
 
         for i in self.intf_ids:
             shared_pool_id = self.pon_resource_ranges[PONResourceManager.GEMPORT_ID_SHARED_IDX]
-            if shared_pool_id is not None: i = shared_pool_id
+            if shared_pool_id is not None:
+                i = shared_pool_id
             self.clear_resource_id_pool(
                 pon_intf_id=i,
                 resource_type=PONResourceManager.GEMPORT_ID,
             )
-            if shared_pool_id is not None: break
+            if shared_pool_id is not None:
+                break
+
+        for i in self.intf_ids:
+            shared_pool_id = self.pon_resource_ranges[PONResourceManager.FLOW_ID_SHARED_IDX]
+            if shared_pool_id is not None:
+                i = shared_pool_id
+            self.clear_resource_id_pool(
+                pon_intf_id=i,
+                resource_type=PONResourceManager.FLOW_ID,
+            )
+            if shared_pool_id is not None:
+                break
 
     def init_resource_id_pool(self, pon_intf_id, resource_type, start_idx,
                               end_idx):
@@ -367,7 +439,7 @@
         shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
         if shared_resource_mgr is not None and shared_resource_mgr is not self:
             return shared_resource_mgr.init_resource_id_pool(pon_intf_id, resource_type,
-                start_idx, end_idx)
+                                                             start_idx, end_idx)
 
         path = self._get_path(pon_intf_id, resource_type)
         if path is None:
@@ -398,7 +470,7 @@
 
     def get_resource_id(self, pon_intf_id, resource_type, num_of_id=1):
         """
-        Create alloc/gemport/onu id for given OLT PON interface.
+        Create alloc/gemport/onu/flow id for given OLT PON interface.
 
         :param pon_intf_id: OLT PON interface id
         :param resource_type: String to identify type of resource
@@ -409,6 +481,10 @@
         """
         result = None
 
+        if num_of_id < 1:
+            self._log.error("invalid-num-of-resources-requested")
+            return result
+
         # delegate to the master instance if sharing enabled across instances
         shared_resource_mgr = self.shared_resource_mgrs[self.shared_idx_by_type[resource_type]]
         if shared_resource_mgr is not None and shared_resource_mgr is not self:
@@ -420,16 +496,20 @@
 
         try:
             resource = self._get_resource(path)
-            if resource is not None and resource_type == \
-                    PONResourceManager.ONU_ID:
+            if resource is not None and \
+                    (resource_type == PONResourceManager.ONU_ID or
+                     resource_type == PONResourceManager.FLOW_ID):
                 result = self._generate_next_id(resource)
             elif resource is not None and (
                     resource_type == PONResourceManager.GEMPORT_ID or
                     resource_type == PONResourceManager.ALLOC_ID):
-                result = list()
-                while num_of_id > 0:
-                    result.append(self._generate_next_id(resource))
-                    num_of_id -= 1
+                if num_of_id == 1:
+                    result = self._generate_next_id(resource)
+                else:
+                    result = list()
+                    while num_of_id > 0:
+                        result.append(self._generate_next_id(resource))
+                        num_of_id -= 1
             else:
                 raise Exception("get-resource-failed")
 
@@ -445,7 +525,7 @@
 
     def free_resource_id(self, pon_intf_id, resource_type, release_content):
         """
-        Release alloc/gemport/onu id for given OLT PON interface.
+        Release alloc/gemport/onu/flow id for given OLT PON interface.
 
         :param pon_intf_id: OLT PON interface id
         :param resource_type: String to identify type of resource
@@ -466,8 +546,9 @@
 
         try:
             resource = self._get_resource(path)
-            if resource is not None and resource_type == \
-                    PONResourceManager.ONU_ID:
+            if resource is not None and (
+                    resource_type == PONResourceManager.ONU_ID or
+                    resource_type == PONResourceManager.FLOW_ID):
                 self._release_id(resource, release_content)
             elif resource is not None and (
                     resource_type == PONResourceManager.ALLOC_ID or
@@ -564,6 +645,8 @@
         Get currently configured alloc ids for given pon_intf_onu_id
 
         :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of alloc_ids if available, else None
         """
         path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
             self.device_id,
@@ -581,6 +664,8 @@
         Get currently configured gemport ids for given pon_intf_onu_id
 
         :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of gemport IDs if available, else None
         """
 
         path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
@@ -594,11 +679,68 @@
 
         return None
 
+    def get_current_flow_ids_for_onu(self, pon_intf_onu_id):
+        """
+        Get currently configured flow ids for given pon_intf_onu_id
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+
+        :return list: List of Flow IDs if available, else None
+        """
+
+        path = PONResourceManager.FLOW_ID_RESOURCE_MAP_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id))
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            flow_id_list = json.loads(value)
+            assert(isinstance(flow_id_list, list))
+            if len(flow_id_list) > 0:
+                return flow_id_list
+
+        return None
+
+    def get_flow_id_info(self, pon_intf_onu_id, flow_id):
+        """
+        Get flow_id details configured for the ONU.
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow Id reference
+
+        :return blob: Flow data blob if available, else None
+        """
+
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id),
+            flow_id)
+        value = self._kv_store.get_from_kv_store(path)
+        if value is not None:
+            return ast.literal_eval(value)
+
+        return None
+
+    def remove_flow_id_info(self, pon_intf_onu_id, flow_id):
+        """
+        Get flow_id details configured for the ONU.
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow Id reference
+
+        """
+
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id,
+            str(pon_intf_onu_id),
+            flow_id)
+        self._kv_store.remove_from_kv_store(path)
+
     def update_alloc_ids_for_onu(self, pon_intf_onu_id, alloc_ids):
         """
         Update currently configured alloc ids for given pon_intf_onu_id
 
         :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param alloc_ids: list of alloc ids
         """
         path = PONResourceManager.ALLOC_ID_RESOURCE_MAP_PATH.format(
             self.device_id, str(pon_intf_onu_id)
@@ -612,6 +754,7 @@
         Update currently configured gemport ids for given pon_intf_onu_id
 
         :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param gemport_ids: list of gem port ids
         """
         path = PONResourceManager.GEMPORT_ID_RESOURCE_MAP_PATH.format(
             self.device_id, str(pon_intf_onu_id)
@@ -620,6 +763,48 @@
             path, json.dumps(gemport_ids)
         )
 
+    def update_flow_id_for_onu(self, pon_intf_onu_id, flow_id, add=True):
+        """
+        Update the flow_id list of the ONU (add or remove flow_id from the list)
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: flow ID
+        :param add: Boolean flag to indicate whether the flow_id should be
+                    added or removed from the list. Defaults to adding the flow.
+        """
+        path = PONResourceManager.FLOW_ID_RESOURCE_MAP_PATH.format(
+            self.device_id, str(pon_intf_onu_id)
+        )
+        current_flow_ids = self.get_current_flow_ids_for_onu(pon_intf_onu_id)
+        if not isinstance(current_flow_ids, list):
+            # When the first flow_id is being added, the current_flow_ids is None
+            current_flow_ids = list()
+
+        if add:
+            if flow_id not in current_flow_ids:
+                current_flow_ids.append(flow_id)
+        else:
+            if flow_id in current_flow_ids:
+                current_flow_ids.remove(flow_id)
+
+        self._kv_store.update_to_kv_store(path, current_flow_ids)
+
+    def update_flow_id_info_for_onu(self, pon_intf_onu_id, flow_id, flow_data):
+        """
+        Update any metadata associated with the flow_id. The flow_data could be json
+        or any of other data structure. The resource manager doesnt care
+
+        :param pon_intf_onu_id: reference of PON interface id and onu id
+        :param flow_id: Flow ID
+        :param flow_data: Flow data blob
+        """
+        path = PONResourceManager.FLOW_ID_INFO_PATH.format(
+            self.device_id, str(pon_intf_onu_id), flow_id
+        )
+
+        if not self._kv_store.update_to_kv_store(path, flow_data):
+            self._log.error("flow-info-update-failed", path=path, flow_id=flow_id)
+
     def _get_olt_model(self):
         """
         Get olt model variant
@@ -673,7 +858,8 @@
         """
 
         shared_pool_id = self.pon_resource_ranges[self.shared_idx_by_type[resource_type]]
-        if shared_pool_id is not None: pon_intf_id = shared_pool_id
+        if shared_pool_id is not None:
+            pon_intf_id = shared_pool_id
 
         path = None
         if resource_type == PONResourceManager.ONU_ID:
@@ -682,10 +868,22 @@
             path = self._get_alloc_id_resource_path(pon_intf_id)
         elif resource_type == PONResourceManager.GEMPORT_ID:
             path = self._get_gemport_id_resource_path(pon_intf_id)
+        elif resource_type == PONResourceManager.FLOW_ID:
+            path = self._get_flow_id_resource_path(pon_intf_id)
         else:
             self._log.error("invalid-resource-pool-identifier")
         return path
 
+    def _get_flow_id_resource_path(self, pon_intf_id):
+        """
+        Get flow id resource path.
+
+        :param pon_intf_id: OLT PON interface id
+        :return: flow id resource path
+        """
+        return PONResourceManager.FLOW_ID_POOL_PATH.format(
+            self.device_id, pon_intf_id)
+
     def _get_alloc_id_resource_path(self, pon_intf_id):
         """
         Get alloc id resource path.
diff --git a/common/tech_profile/__init__.py b/common/tech_profile/__init__.py
new file mode 100644
index 0000000..b0fb0b2
--- /dev/null
+++ b/common/tech_profile/__init__.py
@@ -0,0 +1,13 @@
+# Copyright 2017-present Open Networking Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
diff --git a/common/tech_profile/tech_profile.py b/common/tech_profile/tech_profile.py
new file mode 100644
index 0000000..c3a9993
--- /dev/null
+++ b/common/tech_profile/tech_profile.py
@@ -0,0 +1,587 @@
+#

+# Copyright 2018 the original author or authors.

+#

+# Licensed under the Apache License, Version 2.0 (the "License");

+# you may not use this file except in compliance with the License.

+# You may obtain a copy of the License at

+#

+# http://www.apache.org/licenses/LICENSE-2.0

+#

+# Unless required by applicable law or agreed to in writing, software

+# distributed under the License is distributed on an "AS IS" BASIS,

+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.

+# See the License for the specific language governing permissions and

+# limitations under the License.

+#

+import json

+import ast

+from collections import namedtuple

+import structlog

+from enum import Enum

+

+from voltha.core.config.config_backend import ConsulStore

+from voltha.core.config.config_backend import EtcdStore

+from voltha.registry import registry

+from voltha.adapters.openolt.protos import openolt_pb2

+

+

+# logger

+log = structlog.get_logger()

+

+DEFAULT_TECH_PROFILE_TABLE_ID = 64

+

+# Enums used while creating TechProfileInstance

+Direction = Enum('Direction', ['UPSTREAM', 'DOWNSTREAM', 'BIDIRECTIONAL'],

+                 start=0)

+SchedulingPolicy = Enum('SchedulingPolicy',

+                        ['WRR', 'StrictPriority', 'Hybrid'], start=0)

+AdditionalBW = Enum('AdditionalBW', ['None', 'NA', 'BestEffort', 'Auto'],

+                    start=0)

+DiscardPolicy = Enum('DiscardPolicy',

+                     ['TailDrop', 'WTailDrop', 'RED', 'WRED'], start=0)

+InferredAdditionBWIndication = Enum('InferredAdditionBWIndication',

+                                    ['None', 'NoneAssured', 'BestEffort'],

+                                    start=0)

+

+

+class InstanceControl(object):

+    # Default value constants

+    ONU_DEFAULT_INSTANCE = 'multi-instance'

+    UNI_DEFAULT_INSTANCE = 'single-instance'

+    DEFAULT_NUM_GEM_PORTS = 1

+    DEFAULT_GEM_PAYLOAD_SIZE = 'auto'

+

+    def __init__(self, onu=ONU_DEFAULT_INSTANCE,

+                 uni=UNI_DEFAULT_INSTANCE,

+                 num_gem_ports=DEFAULT_NUM_GEM_PORTS,

+                 max_gem_payload_size=DEFAULT_GEM_PAYLOAD_SIZE):

+        self.onu = onu

+        self.uni = uni

+        self.num_gem_ports = num_gem_ports

+        self.max_gem_payload_size = max_gem_payload_size

+

+

+class Scheduler(object):

+    # Default value constants

+    DEFAULT_ADDITIONAL_BW = 'auto'

+    DEFAULT_PRIORITY = 0

+    DEFAULT_WEIGHT = 0

+    DEFAULT_Q_SCHED_POLICY = 'hybrid'

+

+    def __init__(self, direction, additional_bw=DEFAULT_ADDITIONAL_BW,

+                 priority=DEFAULT_PRIORITY,

+                 weight=DEFAULT_WEIGHT,

+                 q_sched_policy=DEFAULT_Q_SCHED_POLICY):

+        self.direction = direction

+        self.additional_bw = additional_bw

+        self.priority = priority

+        self.weight = weight

+        self.q_sched_policy = q_sched_policy

+

+

+class GemPortAttribute(object):

+    # Default value constants

+    DEFAULT_AES_ENCRYPTION = 'True'

+    DEFAULT_PRIORITY_Q = 0

+    DEFAULT_WEIGHT = 0

+    DEFAULT_MAX_Q_SIZE = 'auto'

+    DEFAULT_DISCARD_POLICY = DiscardPolicy.TailDrop.name

+

+    def __init__(self, pbit_map, discard_config,

+                 aes_encryption=DEFAULT_AES_ENCRYPTION,

+                 scheduling_policy=SchedulingPolicy.WRR.name,

+                 priority_q=DEFAULT_PRIORITY_Q,

+                 weight=DEFAULT_WEIGHT,

+                 max_q_size=DEFAULT_MAX_Q_SIZE,

+                 discard_policy=DiscardPolicy.TailDrop.name):

+        self.max_q_size = max_q_size

+        self.pbit_map = pbit_map

+        self.aes_encryption = aes_encryption

+        self.scheduling_policy = scheduling_policy

+        self.priority_q = priority_q

+        self.weight = weight

+        self.discard_policy = discard_policy

+        self.discard_config = discard_config

+

+

+class DiscardConfig(object):

+    # Default value constants

+    DEFAULT_MIN_THRESHOLD = 0

+    DEFAULT_MAX_THRESHOLD = 0

+    DEFAULT_MAX_PROBABILITY = 0

+

+    def __init__(self, min_threshold=DEFAULT_MIN_THRESHOLD,

+                 max_threshold=DEFAULT_MAX_THRESHOLD,

+                 max_probability=DEFAULT_MAX_PROBABILITY):

+        self.min_threshold = min_threshold

+        self.max_threshold = max_threshold

+        self.max_probability = max_probability

+

+

+class TechProfile(object):

+    # Constants used in default tech profile

+    DEFAULT_TECH_PROFILE_NAME = 'Default_1tcont_1gem_Profile'

+    DEFAULT_VERSION = 1.0

+    DEFAULT_GEMPORTS_COUNT = 1

+    pbits = ['0b11111111']

+

+    # Tech profile path prefix in kv store

+    KV_STORE_TECH_PROFILE_PATH_PREFIX = 'voltha/technology_profiles'

+

+    # Tech profile path in kv store

+    TECH_PROFILE_PATH = '{}/{}'  # <technology>/<table_id>

+

+    # Tech profile instance path in kv store

+    # Format: <technology>/<table_id>/<uni_port_name>

+    TECH_PROFILE_INSTANCE_PATH = '{}/{}/{}'

+

+    # Tech-Profile JSON String Keys

+    NAME = 'name'

+    PROFILE_TYPE = 'profile_type'

+    VERSION = 'version'

+    NUM_GEM_PORTS = 'num_gem_ports'

+    INSTANCE_CONTROL = 'instance_control'

+    US_SCHEDULER = 'us_scheduler'

+    DS_SCHEDULER = 'ds_scheduler'

+    UPSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'upstream_gem_port_attribute_list'

+    DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = 'downstream_gem_port_attribute_list'

+    ONU = 'onu'

+    UNI = 'uni'

+    MAX_GEM_PAYLOAD_SIZE = 'max_gem_payload_size'

+    DIRECTION = 'direction'

+    ADDITIONAL_BW = 'additional_bw'

+    PRIORITY = 'priority'

+    Q_SCHED_POLICY = 'q_sched_policy'

+    WEIGHT = 'weight'

+    PBIT_MAP = 'pbit_map'

+    DISCARD_CONFIG = 'discard_config'

+    MAX_THRESHOLD = 'max_threshold'

+    MIN_THRESHOLD = 'min_threshold'

+    MAX_PROBABILITY = 'max_probability'

+    DISCARD_POLICY = 'discard_policy'

+    PRIORITY_Q = 'priority_q'

+    SCHEDULING_POLICY = 'scheduling_policy'

+    MAX_Q_SIZE = 'max_q_size'

+    AES_ENCRYPTION = 'aes_encryption'

+

+    def __init__(self, resource_mgr):

+        try:

+            self.args = registry('main').get_args()

+            self.resource_mgr = resource_mgr

+

+            if self.args.backend == 'etcd':

+                # KV store's IP Address and PORT

+                host, port = self.args.etcd.split(':', 1)

+                self._kv_store = EtcdStore(

+                    host, port, TechProfile.

+                        KV_STORE_TECH_PROFILE_PATH_PREFIX)

+            elif self.args.backend == 'consul':

+                # KV store's IP Address and PORT

+                host, port = self.args.consul.split(':', 1)

+                self._kv_store = ConsulStore(

+                    host, port, TechProfile.

+                        KV_STORE_TECH_PROFILE_PATH_PREFIX)

+

+            # self.tech_profile_instance_store = dict()

+        except Exception as e:

+            log.exception("exception-in-init")

+            raise Exception(e)

+

+    class DefaultTechProfile(object):

+        def __init__(self, name, **kwargs):

+            self.name = name

+            self.profile_type = kwargs[TechProfile.PROFILE_TYPE]

+            self.version = kwargs[TechProfile.VERSION]

+            self.num_gem_ports = kwargs[TechProfile.NUM_GEM_PORTS]

+            self.instance_control = kwargs[TechProfile.INSTANCE_CONTROL]

+            self.us_scheduler = kwargs[TechProfile.US_SCHEDULER]

+            self.ds_scheduler = kwargs[TechProfile.DS_SCHEDULER]

+            self.upstream_gem_port_attribute_list = kwargs[

+                TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]

+            self.downstream_gem_port_attribute_list = kwargs[

+                TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]

+

+        def to_json(self):

+            return json.dumps(self, default=lambda o: o.__dict__,

+                              indent=4)

+

+    def get_tp_path(self, table_id, uni_port_name):

+        return TechProfile.TECH_PROFILE_INSTANCE_PATH.format(

+            self.resource_mgr.technology, table_id, uni_port_name)

+

+    def create_tech_profile_instance(self, table_id, uni_port_name, intf_id):

+        tech_profile_instance = None

+        try:

+            # Get tech profile from kv store

+            tech_profile = self._get_tech_profile_from_kv_store(table_id)

+            path = self.get_tp_path(table_id, uni_port_name)

+

+            if tech_profile is not None:

+                tech_profile = self._get_tech_profile(tech_profile)

+                log.debug(

+                    "Created-tech-profile-instance-with-values-from-kvstore")

+            else:

+                tech_profile = self._default_tech_profile()

+                log.debug(

+                    "Created-tech-profile-instance-with-default-values")

+

+            tech_profile_instance = TechProfileInstance(

+                uni_port_name, tech_profile, self.resource_mgr, intf_id)

+            self._add_tech_profile_instance(path,

+                                            tech_profile_instance.to_json())

+        except Exception as e:

+            log.exception("Create-tech-profile-instance-failed", exception=e)

+

+        return tech_profile_instance

+

+    def get_tech_profile_instance(self, table_id, uni_port_name):

+        # path to fetch tech profile instance json from kv store

+        path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(

+            self.resource_mgr.technology, table_id, uni_port_name)

+

+        try:

+            tech_profile_instance = self._kv_store[path]

+            log.debug("Tech-profile-instance-present-in-kvstore", path=path,

+                      tech_profile_instance=tech_profile_instance)

+

+            # Parse JSON into an object with attributes corresponding to dict keys.

+            tech_profile_instance = json.loads(tech_profile_instance,

+                                               object_hook=lambda d:

+                                               namedtuple('tech_profile_instance',

+                                                          d.keys())(*d.values()))

+            log.debug("Tech-profile-instance-after-json-to-object-conversion", path=path,

+                      tech_profile_instance=tech_profile_instance)

+            return tech_profile_instance

+        except BaseException as e:

+            log.debug("Tech-profile-instance-not-present-in-kvstore",

+                      path=path, tech_profile_instance=None, exception=e)

+            return None

+

+    def delete_tech_profile_instance(self, table_id, uni_port_name):

+        # path to delete tech profile instance json from kv store

+        path = TechProfile.TECH_PROFILE_INSTANCE_PATH.format(

+            self.resource_mgr.technology, table_id, uni_port_name)

+

+        try:

+            del self._kv_store[path]

+            log.debug("Delete-tech-profile-instance-success", path=path)

+            return True

+        except Exception as e:

+            log.debug("Delete-tech-profile-instance-failed", path=path,

+                      exception=e)

+            return False

+

+    def _get_tech_profile_from_kv_store(self, table_id):

+        """

+        Get tech profile from kv store.

+

+        :param table_id: reference to get tech profile

+        :return: tech profile if present in kv store else None

+        """

+        # get tech profile from kv store

+        path = TechProfile.TECH_PROFILE_PATH.format(self.resource_mgr.technology,

+                                                    table_id)

+        try:

+            tech_profile = self._kv_store[path]

+            if tech_profile != '':

+                log.debug("Get-tech-profile-success", tech_profile=tech_profile)

+                return json.loads(tech_profile)

+                # return ast.literal_eval(tech_profile)

+        except KeyError as e:

+            log.info("Get-tech-profile-failed", exception=e)

+            return None

+

+    def _default_tech_profile(self):

+        # Default tech profile

+        upstream_gem_port_attribute_list = list()

+        downstream_gem_port_attribute_list = list()

+        for pbit in TechProfile.pbits:

+            upstream_gem_port_attribute_list.append(

+                GemPortAttribute(pbit_map=pbit,

+                                 discard_config=DiscardConfig()))

+            downstream_gem_port_attribute_list.append(

+                GemPortAttribute(pbit_map=pbit,

+                                 discard_config=DiscardConfig()))

+

+        return TechProfile.DefaultTechProfile(

+            TechProfile.DEFAULT_TECH_PROFILE_NAME,

+            profile_type=self.resource_mgr.technology,

+            version=TechProfile.DEFAULT_VERSION,

+            num_gem_ports=TechProfile.DEFAULT_GEMPORTS_COUNT,

+            instance_control=InstanceControl(),

+            us_scheduler=Scheduler(direction=Direction.UPSTREAM.name),

+            ds_scheduler=Scheduler(direction=Direction.DOWNSTREAM.name),

+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,

+            downstream_gem_port_attribute_list=

+            downstream_gem_port_attribute_list)

+

+    @staticmethod

+    def _get_tech_profile(tech_profile):

+        # Tech profile fetched from kv store

+        instance_control = tech_profile[TechProfile.INSTANCE_CONTROL]

+        instance_control = InstanceControl(

+            onu=instance_control[TechProfile.ONU],

+            uni=instance_control[TechProfile.UNI],

+            max_gem_payload_size=instance_control[

+                TechProfile.MAX_GEM_PAYLOAD_SIZE])

+

+        us_scheduler = tech_profile[TechProfile.US_SCHEDULER]

+        us_scheduler = Scheduler(direction=us_scheduler[TechProfile.DIRECTION],

+                                 additional_bw=us_scheduler[

+                                     TechProfile.ADDITIONAL_BW],

+                                 priority=us_scheduler[TechProfile.PRIORITY],

+                                 weight=us_scheduler[TechProfile.WEIGHT],

+                                 q_sched_policy=us_scheduler[

+                                     TechProfile.Q_SCHED_POLICY])

+        ds_scheduler = tech_profile[TechProfile.DS_SCHEDULER]

+        ds_scheduler = Scheduler(direction=ds_scheduler[TechProfile.DIRECTION],

+                                 additional_bw=ds_scheduler[

+                                     TechProfile.ADDITIONAL_BW],

+                                 priority=ds_scheduler[TechProfile.PRIORITY],

+                                 weight=ds_scheduler[TechProfile.WEIGHT],

+                                 q_sched_policy=ds_scheduler[

+                                     TechProfile.Q_SCHED_POLICY])

+

+        upstream_gem_port_attribute_list = list()

+        downstream_gem_port_attribute_list = list()

+        us_gemport_attr_list = tech_profile[

+            TechProfile.UPSTREAM_GEM_PORT_ATTRIBUTE_LIST]

+        for i in range(len(us_gemport_attr_list)):

+            upstream_gem_port_attribute_list.append(

+                GemPortAttribute(pbit_map=us_gemport_attr_list[i][TechProfile.PBIT_MAP],

+                                 discard_config=DiscardConfig(

+                                     max_threshold=

+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MAX_THRESHOLD],

+                                     min_threshold=

+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MIN_THRESHOLD],

+                                     max_probability=

+                                     us_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MAX_PROBABILITY]),

+                                 discard_policy=us_gemport_attr_list[i][

+                                     TechProfile.DISCARD_POLICY],

+                                 priority_q=us_gemport_attr_list[i][

+                                     TechProfile.PRIORITY_Q],

+                                 weight=us_gemport_attr_list[i][TechProfile.WEIGHT],

+                                 scheduling_policy=us_gemport_attr_list[i][

+                                     TechProfile.SCHEDULING_POLICY],

+                                 max_q_size=us_gemport_attr_list[i][

+                                     TechProfile.MAX_Q_SIZE],

+                                 aes_encryption=us_gemport_attr_list[i][

+                                     TechProfile.AES_ENCRYPTION]))

+

+        ds_gemport_attr_list = tech_profile[

+            TechProfile.DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST]

+        for i in range(len(ds_gemport_attr_list)):

+            downstream_gem_port_attribute_list.append(

+                GemPortAttribute(pbit_map=ds_gemport_attr_list[i][TechProfile.PBIT_MAP],

+                                 discard_config=DiscardConfig(

+                                     max_threshold=

+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MAX_THRESHOLD],

+                                     min_threshold=

+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MIN_THRESHOLD],

+                                     max_probability=

+                                     ds_gemport_attr_list[i][TechProfile.DISCARD_CONFIG][

+                                         TechProfile.MAX_PROBABILITY]),

+                                 discard_policy=ds_gemport_attr_list[i][

+                                     TechProfile.DISCARD_POLICY],

+                                 priority_q=ds_gemport_attr_list[i][

+                                     TechProfile.PRIORITY_Q],

+                                 weight=ds_gemport_attr_list[i][TechProfile.WEIGHT],

+                                 scheduling_policy=ds_gemport_attr_list[i][

+                                     TechProfile.SCHEDULING_POLICY],

+                                 max_q_size=ds_gemport_attr_list[i][

+                                     TechProfile.MAX_Q_SIZE],

+                                 aes_encryption=ds_gemport_attr_list[i][

+                                     TechProfile.AES_ENCRYPTION]))

+

+        return TechProfile.DefaultTechProfile(

+            tech_profile[TechProfile.NAME],

+            profile_type=tech_profile[TechProfile.PROFILE_TYPE],

+            version=tech_profile[TechProfile.VERSION],

+            num_gem_ports=tech_profile[TechProfile.NUM_GEM_PORTS],

+            instance_control=instance_control,

+            us_scheduler=us_scheduler,

+            ds_scheduler=ds_scheduler,

+            upstream_gem_port_attribute_list=upstream_gem_port_attribute_list,

+            downstream_gem_port_attribute_list=

+            downstream_gem_port_attribute_list)

+

+    def _add_tech_profile_instance(self, path, tech_profile_instance):

+        """

+        Add tech profile to kv store.

+

+        :param path: path to add tech profile

+        :param tech_profile_instance: tech profile instance need to be added

+        """

+        try:

+            self._kv_store[path] = str(tech_profile_instance)

+            log.debug("Add-tech-profile-instance-success", path=path,

+                      tech_profile_instance=tech_profile_instance)

+            return True

+        except BaseException as e:

+            log.exception("Add-tech-profile-instance-failed", path=path,

+                          tech_profile_instance=tech_profile_instance,

+                          exception=e)

+        return False

+

+    @staticmethod

+    def get_us_scheduler(tech_profile_instance):

+        # upstream scheduler

+        us_scheduler = openolt_pb2.Scheduler(

+            direction=TechProfile.get_parameter(

+                'direction', tech_profile_instance.us_scheduler.

+                    direction),

+            additional_bw=TechProfile.get_parameter(

+                'additional_bw', tech_profile_instance.

+                    us_scheduler.additional_bw),

+            priority=tech_profile_instance.us_scheduler.priority,

+            weight=tech_profile_instance.us_scheduler.weight,

+            sched_policy=TechProfile.get_parameter(

+                'sched_policy', tech_profile_instance.

+                    us_scheduler.q_sched_policy))

+

+        return us_scheduler

+

+    @staticmethod

+    def get_ds_scheduler(tech_profile_instance):

+        ds_scheduler = openolt_pb2.Scheduler(

+            direction=TechProfile.get_parameter(

+                'direction', tech_profile_instance.ds_scheduler.

+                    direction),

+            additional_bw=TechProfile.get_parameter(

+                'additional_bw', tech_profile_instance.

+                    ds_scheduler.additional_bw),

+            priority=tech_profile_instance.ds_scheduler.priority,

+            weight=tech_profile_instance.ds_scheduler.weight,

+            sched_policy=TechProfile.get_parameter(

+                'sched_policy', tech_profile_instance.ds_scheduler.

+                    q_sched_policy))

+

+        return ds_scheduler

+

+    @staticmethod

+    def get_tconts(tech_profile_instance, us_scheduler=None, ds_scheduler=None):

+        if us_scheduler is None:

+            us_scheduler = TechProfile.get_us_scheduler(tech_profile_instance)

+        if ds_scheduler is None:

+            ds_scheduler = TechProfile.get_ds_scheduler(tech_profile_instance)

+

+        tconts = [openolt_pb2.Tcont(direction=TechProfile.get_parameter(

+            'direction',

+            tech_profile_instance.

+                us_scheduler.direction),

+            alloc_id=tech_profile_instance.

+                us_scheduler.alloc_id,

+            scheduler=us_scheduler),

+            openolt_pb2.Tcont(direction=TechProfile.get_parameter(

+                'direction',

+                tech_profile_instance.

+                    ds_scheduler.direction),

+                alloc_id=tech_profile_instance.

+                    ds_scheduler.alloc_id,

+                scheduler=ds_scheduler)]

+

+        return tconts

+

+    @staticmethod

+    def get_parameter(param_type, param_value):

+        parameter = None

+        try:

+            if param_type == 'direction':

+                for direction in openolt_pb2.Direction.keys():

+                    if param_value == direction:

+                        parameter = direction

+            elif param_type == 'discard_policy':

+                for discard_policy in openolt_pb2.DiscardPolicy.keys():

+                    if param_value == discard_policy:

+                        parameter = discard_policy

+            elif param_type == 'sched_policy':

+                for sched_policy in openolt_pb2.SchedulingPolicy.keys():

+                    if param_value == sched_policy:

+                        parameter = sched_policy

+            elif param_type == 'additional_bw':

+                for bw_component in openolt_pb2.AdditionalBW.keys():

+                    if param_value == bw_component:

+                        parameter = bw_component

+        except BaseException as e:

+            log.exception(exception=e)

+        return parameter

+

+

+class TechProfileInstance(object):

+    def __init__(self, subscriber_identifier, tech_profile, resource_mgr,

+                 intf_id, num_of_tconts=1):

+        if tech_profile is not None:

+            self.subscriber_identifier = subscriber_identifier

+            self.num_of_tconts = num_of_tconts

+            self.num_of_gem_ports = tech_profile.num_gem_ports

+            self.name = tech_profile.name

+            self.profile_type = tech_profile.profile_type

+            self.version = tech_profile.version

+            self.instance_control = tech_profile.instance_control

+

+            # TODO: Fixed num_of_tconts to 1 per TP Instance.

+            # This may change in future

+            assert (num_of_tconts == 1)

+            # Get alloc id and gemport id using resource manager

+            alloc_id = resource_mgr.get_resource_id(intf_id,

+                                                    'ALLOC_ID',

+                                                    num_of_tconts)

+            gem_ports = resource_mgr.get_resource_id(intf_id,

+                                                     'GEMPORT_ID',

+                                                     self.num_of_gem_ports)

+

+            gemport_list = list()

+            if isinstance(gem_ports, int):

+               gemport_list.append(gem_ports)

+            elif isinstance(gem_ports, list):

+                for gem in gem_ports:

+                    gemport_list.append(gem)

+            else:

+                raise Exception("invalid-type")

+

+            self.us_scheduler = TechProfileInstance.IScheduler(

+                alloc_id, tech_profile.us_scheduler)

+            self.ds_scheduler = TechProfileInstance.IScheduler(

+                alloc_id, tech_profile.ds_scheduler)

+

+            self.upstream_gem_port_attribute_list = list()

+            self.downstream_gem_port_attribute_list = list()

+            for i in range(self.num_of_gem_ports):

+                self.upstream_gem_port_attribute_list.append(

+                    TechProfileInstance.IGemPortAttribute(

+                        gemport_list[i],

+                        tech_profile.upstream_gem_port_attribute_list[

+                            i]))

+                self.downstream_gem_port_attribute_list.append(

+                    TechProfileInstance.IGemPortAttribute(

+                        gemport_list[i],

+                        tech_profile.downstream_gem_port_attribute_list[

+                            i]))

+

+    class IScheduler(Scheduler):

+        def __init__(self, alloc_id, scheduler):

+            super(TechProfileInstance.IScheduler, self).__init__(

+                scheduler.direction, scheduler.additional_bw,

+                scheduler.priority,

+                scheduler.weight, scheduler.q_sched_policy)

+            self.alloc_id = alloc_id

+

+    class IGemPortAttribute(GemPortAttribute):

+        def __init__(self, gemport_id, gem_port_attribute):

+            super(TechProfileInstance.IGemPortAttribute, self).__init__(

+                gem_port_attribute.pbit_map, gem_port_attribute.discard_config,

+                gem_port_attribute.aes_encryption,

+                gem_port_attribute.scheduling_policy,

+                gem_port_attribute.priority_q, gem_port_attribute.weight,

+                gem_port_attribute.max_q_size,

+                gem_port_attribute.discard_policy)

+            self.gemport_id = gemport_id

+

+    def to_json(self):

+        return json.dumps(self, default=lambda o: o.__dict__,

+                          indent=4)