VOL-3431: Following enhancement/changes are done in this patch
- Process ONUs in bulk rather than serial, significantly improves run time
- Used a separate API to get flow-id. This flow-id is not freed on failure,
  as this adds to unnecessary complexity and unwarranted for a test tool .
- Print the total execution time at the end of the test
- Fixed the Dockerfile to not build vendor module at each docker build time
- Introduced new functions to retry scheduler, queue and flow adds on failure,
  but these are not currently used
- Add vendor modules to repo just like all other ONF VOLTHA golang projects do
- Tested all three workflows - ATT, DT and TT
- Bump version to 1.1.0

Change-Id: I6102cb206e78ea04b49b7125b101946ca6f36bfb
diff --git a/.gitignore b/.gitignore
index 0d07a83..4a23d30 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,3 @@
 .idea
 openolt-scale-tester
 tests
-!vendor/modules.txt
-vendor/*
diff --git a/VERSION b/VERSION
index 74bcb8d..9084fa2 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.0.5-dev
+1.1.0
diff --git a/compose/openolt-scale-tester.yml b/compose/openolt-scale-tester.yml
index 5bb4543..7b20825 100644
--- a/compose/openolt-scale-tester.yml
+++ b/compose/openolt-scale-tester.yml
@@ -27,7 +27,7 @@
       "--openolt_agent_ip_address=${OPENOLT_AGENT_IP_ADDRESS}",
       "--openolt_agent_port=9191",
       "--openolt_agent_nni_intf_id=0",
-      "--num_of_onu=16",
+      "--num_of_onu=512",
       "--subscribers_per_onu=1",
       "--workflow_name=ATT",
       "--time_interval_between_subs=0",
diff --git a/core/att_workflow.go b/core/att_workflow.go
index cf5cbae..f673ee0 100644
--- a/core/att_workflow.go
+++ b/core/att_workflow.go
@@ -22,7 +22,6 @@
 
 	"github.com/opencord/openolt-scale-tester/config"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager"
 	oop "github.com/opencord/voltha-protos/v3/go/openolt"
 	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
 	"golang.org/x/net/context"
@@ -39,11 +38,10 @@
 }
 
 func AddDhcpIPV4Flow(oo oop.OpenoltClient, config *config.OpenOltScaleTesterConfig, rsrMgr *OpenOltResourceMgr) error {
-	var flowID []uint32
+	var flowID uint32
 	var err error
 
-	if flowID, err = rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].GetResourceID(context.Background(), uint32(config.NniIntfID),
-		ponresourcemanager.FLOW_ID, 1); err != nil {
+	if flowID, err = rsrMgr.GetFlowID(context.Background(), uint32(config.NniIntfID)); err != nil {
 		return err
 	}
 
@@ -52,7 +50,7 @@
 	actionCmd := &oop.ActionCmd{TrapToHost: true}
 	actionInfo := &oop.Action{Cmd: actionCmd}
 
-	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID[0],
+	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID,
 		FlowType: "downstream", AllocId: -1, GemportId: -1,
 		Classifier: flowClassifier, Action: actionInfo,
 		Priority: 1000, PortNo: uint32(config.NniIntfID)}
@@ -67,8 +65,6 @@
 
 	if err != nil {
 		log.Errorw("Failed to Add DHCP IPv4 to device", log.Fields{"err": err, "deviceFlow": flow})
-		rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].FreeResourceID(context.Background(), uint32(config.NniIntfID),
-			ponresourcemanager.FLOW_ID, flowID)
 		return err
 	}
 	log.Debugw("DHCP IPV4 added to device successfully ", log.Fields{"flow": flow})
@@ -77,11 +73,10 @@
 }
 
 func AddDhcpIPV6Flow(oo oop.OpenoltClient, config *config.OpenOltScaleTesterConfig, rsrMgr *OpenOltResourceMgr) error {
-	var flowID []uint32
+	var flowID uint32
 	var err error
 
-	if flowID, err = rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].GetResourceID(context.Background(), uint32(config.NniIntfID),
-		ponresourcemanager.FLOW_ID, 1); err != nil {
+	if flowID, err = rsrMgr.GetFlowID(context.Background(), uint32(config.NniIntfID)); err != nil {
 		return err
 	}
 
@@ -90,7 +85,7 @@
 	actionCmd := &oop.ActionCmd{TrapToHost: true}
 	actionInfo := &oop.Action{Cmd: actionCmd}
 
-	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID[0],
+	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID,
 		FlowType: "downstream", AllocId: -1, GemportId: -1,
 		Classifier: flowClassifier, Action: actionInfo,
 		Priority: 1000, PortNo: uint32(config.NniIntfID)}
@@ -105,8 +100,6 @@
 
 	if err != nil {
 		log.Errorw("Failed to Add DHCP IPV6 to device", log.Fields{"err": err, "deviceFlow": flow})
-		rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].FreeResourceID(context.Background(), uint32(config.NniIntfID),
-			ponresourcemanager.FLOW_ID, flowID)
 		return err
 	}
 	log.Debugw("DHCP IPV6 added to device successfully ", log.Fields{"flow": flow})
@@ -156,7 +149,6 @@
 		log.Errorw("Failed to create traffic schedulers", log.Fields{"error": err})
 		return errors.New(ReasonCodeToReasonString(SCHED_CREATION_FAILED))
 	}
-
 	return nil
 }
 
@@ -203,7 +195,7 @@
 
 func (att AttWorkFlow) ProvisionEapFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -216,13 +208,10 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
-					if err := AddFlow(subs, EapolFlow, Upstream, flowID[0], allocID, gemID, pcp); err != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
+					if err := AddFlow(subs, EapolFlow, Upstream, flowID, allocID, gemID, pcp); err != nil {
 						return err
 					}
 				}
@@ -234,7 +223,7 @@
 
 func (att AttWorkFlow) ProvisionDhcpIPV4Flow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -247,13 +236,10 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
-					if err := AddFlow(subs, DhcpFlowIPV4, Upstream, flowID[0], allocID, gemID, pcp); err != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
+					if err := AddFlow(subs, DhcpFlowIPV4, Upstream, flowID, allocID, gemID, pcp); err != nil {
 						return err
 					}
 				}
@@ -265,7 +251,7 @@
 
 func (att AttWorkFlow) ProvisionDhcpIPV6Flow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -278,13 +264,10 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
-					if err := AddFlow(subs, DhcpFlowIPV6, Upstream, flowID[0], allocID, gemID, pcp); err != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
+					if err := AddFlow(subs, DhcpFlowIPV6, Upstream, flowID, allocID, gemID, pcp); err != nil {
 						return err
 					}
 				}
@@ -301,7 +284,7 @@
 
 func (att AttWorkFlow) ProvisionHsiaFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -314,22 +297,19 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
 					var errUs, errDs error
-					if errUs = AddFlow(subs, HsiaFlow, Upstream, flowID[0], allocID, gemID, pcp); errUs != nil {
+					if errUs = AddFlow(subs, HsiaFlow, Upstream, flowID, allocID, gemID, pcp); errUs != nil {
 						log.Errorw("failed to install US HSIA flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDs = AddFlow(subs, HsiaFlow, Downstream, flowID[0], allocID, gemID, pcp); errDs != nil {
+					if errDs = AddFlow(subs, HsiaFlow, Downstream, flowID, allocID, gemID, pcp); errDs != nil {
 						log.Errorw("failed to install US HSIA flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
 					if errUs != nil && errDs != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
 					}
 					if errUs != nil || errDs != nil {
 						if errUs != nil {
diff --git a/core/dt_workflow.go b/core/dt_workflow.go
index a06b29b..5c8b345 100644
--- a/core/dt_workflow.go
+++ b/core/dt_workflow.go
@@ -22,7 +22,6 @@
 
 	"github.com/opencord/openolt-scale-tester/config"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager"
 	oop "github.com/opencord/voltha-protos/v3/go/openolt"
 	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
 	"golang.org/x/net/context"
@@ -143,7 +142,7 @@
 
 func (dt DtWorkFlow) ProvisionHsiaFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -156,16 +155,13 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
-					if err := AddFlow(subs, HsiaFlow, Upstream, flowID[0], allocID, gemID, pcp); err != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
+					if err := AddFlow(subs, HsiaFlow, Upstream, flowID, allocID, gemID, pcp); err != nil {
 						return err
 					}
-					if err := AddFlow(subs, HsiaFlow, Downstream, flowID[0], allocID, gemID, pcp); err != nil {
+					if err := AddFlow(subs, HsiaFlow, Downstream, flowID, allocID, gemID, pcp); err != nil {
 						return err
 					}
 				}
diff --git a/core/olt_manager.go b/core/olt_manager.go
index 78dd97e..f4e3143 100644
--- a/core/olt_manager.go
+++ b/core/olt_manager.go
@@ -44,7 +44,6 @@
 const (
 	ReasonOk          = "OK"
 	TechProfileKVPath = "service/voltha/technology_profiles/%s/%d" // service/voltha/technology_profiles/xgspon/<tech_profile_tableID>
-	DTWorkFlow        = "DT"
 )
 
 type OnuDeviceKey struct {
@@ -62,6 +61,7 @@
 	testConfig    *config.OpenOltScaleTesterConfig
 	rsrMgr        *OpenOltResourceMgr
 	lockRsrAlloc  sync.RWMutex
+	lockOpenOltManager sync.RWMutex
 }
 
 func init() {
@@ -74,6 +74,7 @@
 		ipPort:       ipPort,
 		OnuDeviceMap: make(map[OnuDeviceKey]*OnuDevice),
 		lockRsrAlloc: sync.RWMutex{},
+		lockOpenOltManager: sync.RWMutex{},
 	}
 }
 
@@ -208,43 +209,76 @@
 
 func (om *OpenOltManager) provisionONUs() {
 	var numOfONUsPerPon uint
-	var i, j, onuID uint32
+	var i, j, k, onuID uint32
 	var err error
-	oltChan := make(chan bool)
-	numOfONUsPerPon = om.testConfig.NumOfOnu / uint(om.deviceInfo.PonPorts)
-	if oddONUs := om.testConfig.NumOfOnu % uint(om.deviceInfo.PonPorts); oddONUs > 0 {
-		log.Warnw("Odd number ONUs left out of provisioning", log.Fields{"oddONUs": oddONUs})
+	var onuWg sync.WaitGroup
+
+	defer func() {
+		// Stop the process once the job is done
+		_ = syscall.Kill(syscall.Getpid(), syscall.SIGINT)
+	}()
+
+	// If the number of ONUs to provision is not a power of 2, stop execution
+	// This is needed for ensure even distribution of ONUs across all PONs
+	if !isPowerOfTwo(om.testConfig.NumOfOnu) {
+		log.Errorw("num-of-onus-to-provision-is-not-a-power-of-2", log.Fields{"numOfOnus": om.testConfig.NumOfOnu})
+		return
 	}
+
+	// Number of ONUs to provision should not be less than the number of PON ports.
+	// We need at least one ONU per PON
+	if om.testConfig.NumOfOnu < uint(om.deviceInfo.PonPorts) {
+		log.Errorw("num-of-onu-is-less-than-num-of-pon-port", log.Fields{"numOfOnus":om.testConfig.NumOfOnu, "numOfPon": om.deviceInfo.PonPorts})
+		return
+	}
+
+	numOfONUsPerPon = om.testConfig.NumOfOnu / uint(om.deviceInfo.PonPorts)
 	totalOnusToProvision := numOfONUsPerPon * uint(om.deviceInfo.PonPorts)
 	log.Infow("***** all-onu-provision-started ******",
 		log.Fields{"totalNumOnus": totalOnusToProvision,
 			"numOfOnusPerPon": numOfONUsPerPon,
 			"numOfPons":       om.deviceInfo.PonPorts})
-	for i = 0; i < om.deviceInfo.PonPorts; i++ {
-		for j = 0; j < uint32(numOfONUsPerPon); j++ {
-			// TODO: More work with ONU provisioning
-			om.lockRsrAlloc.Lock()
-			sn := GenerateNextONUSerialNumber()
-			om.lockRsrAlloc.Unlock()
-			log.Debugw("provisioning onu", log.Fields{"onuID": j, "ponPort": i, "serialNum": sn})
-			if onuID, err = om.rsrMgr.GetONUID(i); err != nil {
-				log.Errorw("error getting onu id", log.Fields{"err": err})
-				continue
-			}
-			log.Infow("onu-provision-started-from-olt-manager", log.Fields{"onuId": onuID, "ponIntf": i})
-			go om.activateONU(i, onuID, sn, om.stringifySerialNumber(sn), oltChan)
-			// Wait for complete ONU provision to succeed, including provisioning the subscriber
-			<-oltChan
-			log.Infow("onu-provision-completed-from-olt-manager", log.Fields{"onuId": onuID, "ponIntf": i})
 
-			// Sleep for configured time before provisioning next ONU
-			time.Sleep(time.Duration(om.testConfig.TimeIntervalBetweenSubs))
-		}
+	// These are the number of ONUs that will be provisioned per PON port per batch.
+	// Such number of ONUs will be chosen across all PON ports per batch
+	var onusPerIterationPerPonPort uint32 = 4
+
+	// If the total number of ONUs per PON is lesser than the default ONU to provision per pon port per batch
+	// then keep halving the ONU to provision per pon port per batch until we reach an acceptable number
+	// Note: the least possible value for onusPerIterationPerPonPort is 1
+	for uint32(numOfONUsPerPon) < onusPerIterationPerPonPort {
+		onusPerIterationPerPonPort /= 2
 	}
-	log.Info("******** all-onu-provisioning-completed *******")
 
-	// TODO: We need to dump the results at the end. But below json marshall does not work
-	// We will need custom Marshal function.
+	startTime := time.Now()
+	// Start provisioning the ONUs
+	for i = 0; i < uint32(numOfONUsPerPon)/onusPerIterationPerPonPort; i++ {
+		for j = 0; j < om.deviceInfo.PonPorts; j++ {
+			for k = 0; k < onusPerIterationPerPonPort; k++ {
+				om.lockRsrAlloc.Lock()
+				sn := GenerateNextONUSerialNumber()
+				om.lockRsrAlloc.Unlock()
+				log.Debugw("provisioning onu", log.Fields{"onuID": j, "ponPort": i, "serialNum": sn})
+				if onuID, err = om.rsrMgr.GetONUID(j); err != nil {
+					log.Errorw("error getting onu id", log.Fields{"err": err})
+					continue
+				}
+				log.Infow("onu-provision-started-from-olt-manager", log.Fields{"onuId": onuID, "ponIntf": i})
+
+				onuWg.Add(1)
+				go om.activateONU(j, onuID, sn, om.stringifySerialNumber(sn), &onuWg)
+			}
+		}
+		// Wait for the group of ONUs to complete processing before going to next batch of ONUs
+		onuWg.Wait()
+	}
+	endTime := time.Now()
+	log.Info("******** all-onu-provisioning-completed *******")
+	totalTime := endTime.Sub(startTime)
+	out := time.Time{}.Add(totalTime)
+	log.Infof("****** Total Time to provision all the ONUs is => %s", out.Format("15:04:05"))
+
+	// TODO: We need to dump the results at the end. But below json marshall does not work. We will need custom Marshal function.
 	/*
 		e, err := json.Marshal(om)
 		if err != nil {
@@ -253,12 +287,9 @@
 		}
 		fmt.Println(string(e))
 	*/
-
-	// Stop the process once the job is done
-	_ = syscall.Kill(syscall.Getpid(), syscall.SIGINT)
 }
 
-func (om *OpenOltManager) activateONU(intfID uint32, onuID uint32, serialNum *oop.SerialNumber, serialNumber string, oltCh chan bool) {
+func (om *OpenOltManager) activateONU(intfID uint32, onuID uint32, serialNum *oop.SerialNumber, serialNumber string, onuWg *sync.WaitGroup) {
 	log.Debugw("activate-onu", log.Fields{"intfID": intfID, "onuID": onuID, "serialNum": serialNum, "serialNumber": serialNumber})
 	// TODO: need resource manager
 	var pir uint32 = 1000000
@@ -269,6 +300,7 @@
 		openOltClient: om.openOltClient,
 		testConfig:    om.testConfig,
 		rsrMgr:        om.rsrMgr,
+		onuWg:         onuWg,
 	}
 	var err error
 	onuDeviceKey := OnuDeviceKey{onuID: onuID, ponInfID: intfID}
@@ -281,7 +313,6 @@
 		st, _ := status.FromError(err)
 		if st.Code() == codes.AlreadyExists {
 			log.Debug("ONU activation is in progress", log.Fields{"SerialNumber": serialNumber})
-			oltCh <- false
 		} else {
 			nanos = now.UnixNano()
 			milliEnd := nanos / 1000000
@@ -289,7 +320,6 @@
 			onuDevice.OnuProvisionDurationInMs = milliEnd - milliStart
 			log.Errorw("activate-onu-failed", log.Fields{"Onu": Onu, "err ": err})
 			onuDevice.Reason = err.Error()
-			oltCh <- false
 		}
 	} else {
 		nanos = now.UnixNano()
@@ -300,12 +330,16 @@
 		log.Infow("activated-onu", log.Fields{"SerialNumber": serialNumber})
 	}
 
+	om.lockOpenOltManager.Lock()
 	om.OnuDeviceMap[onuDeviceKey] = &onuDevice
+	om.lockOpenOltManager.Unlock()
 
 	// If ONU activation was success provision the ONU
 	if err == nil {
-		// start provisioning the ONU
-		go om.OnuDeviceMap[onuDeviceKey].Start(oltCh)
+		om.lockOpenOltManager.RLock()
+		go om.OnuDeviceMap[onuDeviceKey].Start()
+		om.lockOpenOltManager.RUnlock()
+
 	}
 }
 
@@ -441,3 +475,7 @@
 		log.Fields{"numofTech": tpCount, "numPonPorts": om.deviceInfo.GetPonPorts()})
 	return nil
 }
+
+func isPowerOfTwo(numOfOnus uint) bool {
+	return (numOfOnus & (numOfOnus - 1)) == 0
+}
\ No newline at end of file
diff --git a/core/onu_manager.go b/core/onu_manager.go
index 6fa9201..4fd665b 100644
--- a/core/onu_manager.go
+++ b/core/onu_manager.go
@@ -18,6 +18,7 @@
 
 import (
 	"strconv"
+	"sync"
 	"time"
 
 	"github.com/opencord/openolt-scale-tester/config"
@@ -45,13 +46,13 @@
 	openOltClient            oop.OpenoltClient
 	testConfig               *config.OpenOltScaleTesterConfig
 	rsrMgr                   *OpenOltResourceMgr
+	onuWg                    *sync.WaitGroup
 }
 
-func (onu *OnuDevice) Start(oltCh chan bool) {
+func (onu *OnuDevice) Start() {
 	onu.SubscriberMap = make(map[SubscriberKey]*Subscriber)
-	onuCh := make(chan bool)
 	var subs uint
-
+	var subWg sync.WaitGroup
 	log.Infow("onu-provision-started-from-onu-manager", log.Fields{"onuID": onu.OnuID, "ponIntf": onu.PonIntf})
 
 	for subs = 0; subs < onu.testConfig.SubscribersPerOnu; subs++ {
@@ -67,24 +68,22 @@
 			OpenOltClient:  onu.openOltClient,
 			TestConfig:     onu.testConfig,
 			RsrMgr:         onu.rsrMgr,
+			subWg:          &subWg,
 		}
 		subsKey := SubscriberKey{subsName}
 		onu.SubscriberMap[subsKey] = &subs
 
+		subWg.Add(1)
 		log.Infow("subscriber-provision-started-from-onu-manager", log.Fields{"subsName": subsName})
 		// Start provisioning the subscriber
-		go subs.Start(onuCh, onu.testConfig.IsGroupTest)
+		go subs.Start(onu.testConfig.IsGroupTest)
 
-		// Wait for subscriber provision to complete
-		<-onuCh
-
-		log.Infow("subscriber-provision-completed-from-onu-manager", log.Fields{"subsName": subsName})
-
-		//Sleep for configured interval before provisioning another subscriber
-		time.Sleep(time.Duration(onu.testConfig.TimeIntervalBetweenSubs))
 	}
-	// Indicate that the ONU provisioning is complete
-	oltCh <- true
+
+	// Wait for all the subscribers on the ONU to complete provisioning
+	subWg.Wait()
+	// Signal that ONU provisioning is complete
+	onu.onuWg.Done()
 
 	log.Infow("onu-provision-completed-from-onu-manager", log.Fields{"onuID": onu.OnuID, "ponIntf": onu.PonIntf})
 }
diff --git a/core/resource_manager.go b/core/resource_manager.go
index 3be01bc..d8674c9 100644
--- a/core/resource_manager.go
+++ b/core/resource_manager.go
@@ -18,10 +18,9 @@
 package core
 
 import (
-	"errors"
-	"fmt"
 	"strconv"
 	"strings"
+	"sync"
 
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	ponrmgr "github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager"
@@ -36,6 +35,16 @@
 // OpenOltResourceMgr holds resource related information as provided below for each field
 type OpenOltResourceMgr struct {
 	deviceInfo *openolt.DeviceInfo
+
+	// This protects concurrent onu_id allocate/delete calls on a per PON port basis
+	OnuIDMgmtLock []sync.RWMutex
+	// This protects concurrent flow_id allocate/delete calls. We do not need this on a
+	// per PON port basis as flow IDs are unique across the OLT.
+	FlowIDMgmtLock sync.RWMutex
+
+	// This protects concurrent GemID and AllocID allocate/delete calls on a per PON port basis
+	GemIDAllocIDLock []sync.RWMutex
+
 	// array of pon resource managers per interface technology
 	ResourceMgrs map[uint32]*ponrmgr.PONResourceManager
 }
@@ -48,6 +57,11 @@
 	log.Debugf("Init new resource manager")
 
 	ResourceMgr.deviceInfo = devInfo
+	NumPONPorts := devInfo.GetPonPorts()
+
+	ResourceMgr.OnuIDMgmtLock = make([]sync.RWMutex, NumPONPorts)
+	ResourceMgr.GemIDAllocIDLock = make([]sync.RWMutex, NumPONPorts)
+	ResourceMgr.FlowIDMgmtLock = sync.RWMutex{}
 
 	Ranges := make(map[string]*openolt.DeviceInfo_DeviceResourceRanges)
 	RsrcMgrsByTech := make(map[string]*ponrmgr.PONResourceManager)
@@ -64,7 +78,6 @@
 		var ranges openolt.DeviceInfo_DeviceResourceRanges
 		ranges.Technology = devInfo.GetTechnology()
 
-		NumPONPorts := devInfo.GetPonPorts()
 		var index uint32
 		for index = 0; index < NumPONPorts; index++ {
 			ranges.IntfIds = append(ranges.IntfIds, index)
@@ -277,27 +290,6 @@
 
 // Delete clears used resources for the particular olt device being deleted
 func (RsrcMgr *OpenOltResourceMgr) Delete() error {
-	/* TODO
-	   def __del__(self):
-	           self.log.info("clearing-device-resource-pool")
-	           for key, resource_mgr in self.resource_mgrs.iteritems():
-	               resource_mgr.clear_device_resource_pool()
-
-	       def assert_pon_id_limit(self, pon_intf_id):
-	           assert pon_intf_id in self.resource_mgrs
-
-	       def assert_onu_id_limit(self, pon_intf_id, onu_id):
-	           self.assert_pon_id_limit(pon_intf_id)
-	           self.resource_mgrs[pon_intf_id].assert_resource_limits(onu_id, PONResourceManager.ONU_ID)
-
-	       @property
-	       def max_uni_id_per_onu(self):
-	           return 0 #OpenOltPlatform.MAX_UNIS_PER_ONU-1, zero-based indexing Uncomment or override to make default multi-uni
-
-	       def assert_uni_id_limit(self, pon_intf_id, onu_id, uni_id):
-	           self.assert_onu_id_limit(pon_intf_id, onu_id)
-	           self.resource_mgrs[pon_intf_id].assert_resource_limits(uni_id, PONResourceManager.UNI_ID)
-	*/
 	for _, rsrcMgr := range RsrcMgr.ResourceMgrs {
 		if err := rsrcMgr.ClearDeviceResourcePool(context.Background()); err != nil {
 			log.Debug("Failed to clear device resource pool")
@@ -310,143 +302,29 @@
 
 // GetONUID returns the available OnuID for the given pon-port
 func (RsrcMgr *OpenOltResourceMgr) GetONUID(ponIntfID uint32) (uint32, error) {
+	RsrcMgr.OnuIDMgmtLock[ponIntfID].Lock()
+	defer RsrcMgr.OnuIDMgmtLock[ponIntfID].Unlock()
 	// Check if Pon Interface ID is present in Resource-manager-map
-	if _, ok := RsrcMgr.ResourceMgrs[ponIntfID]; !ok {
-		err := errors.New("invalid-pon-interface-" + strconv.Itoa(int(ponIntfID)))
-		return 0, err
-	}
-	// Get ONU id for a provided pon interface ID.
-	ONUID, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(context.Background(), ponIntfID,
+	ONUIDs, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(context.Background(), ponIntfID,
 		ponrmgr.ONU_ID, 1)
 	if err != nil {
 		log.Errorf("Failed to get resource for interface %d for type %s",
 			ponIntfID, ponrmgr.ONU_ID)
-		return 0, err
+		return uint32(0), err
 	}
-	if ONUID != nil {
-		RsrcMgr.ResourceMgrs[ponIntfID].InitResourceMap(context.Background(), fmt.Sprintf("%d,%d", ponIntfID, ONUID[0]))
-		return ONUID[0], err
-	}
-
-	return 0, err // return OnuID 0 on error
+	return ONUIDs[0], err
 }
 
-// GetAllocID return the first Alloc ID for a given pon interface id and onu id and then update the resource map on
-// the KV store with the list of alloc_ids allocated for the pon_intf_onu_id tuple
-// Currently of all the alloc_ids available, it returns the first alloc_id in the list for tha given ONU
-func (RsrcMgr *OpenOltResourceMgr) GetAllocID(intfID uint32, onuID uint32, uniID uint32) uint32 {
-
-	var err error
-	IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", intfID, onuID, uniID)
-	AllocID := RsrcMgr.ResourceMgrs[intfID].GetCurrentAllocIDForOnu(context.Background(), IntfOnuIDUniID)
-	if AllocID != nil {
-		// Since we support only one alloc_id for the ONU at the moment,
-		// return the first alloc_id in the list, if available, for that
-		// ONU.
-		log.Debugw("Retrieved alloc ID from pon resource mgr", log.Fields{"AllocID": AllocID})
-		return AllocID[0]
-	}
-	AllocID, err = RsrcMgr.ResourceMgrs[intfID].GetResourceID(context.Background(), intfID,
-		ponrmgr.ALLOC_ID, 1)
-
-	if AllocID == nil || err != nil {
-		log.Error("Failed to allocate alloc id")
-		return 0
-	}
-	// update the resource map on KV store with the list of alloc_id
-	// allocated for the pon_intf_onu_id tuple
-	err = RsrcMgr.ResourceMgrs[intfID].UpdateAllocIdsForOnu(context.Background(), IntfOnuIDUniID, AllocID)
+// GetFlowID return flow ID for a given pon interface id, onu id and uni id
+func (RsrcMgr *OpenOltResourceMgr) GetFlowID(ctx context.Context, ponIntfID uint32) (uint32, error) {
+	RsrcMgr.FlowIDMgmtLock.Lock()
+	defer RsrcMgr.FlowIDMgmtLock.Unlock()
+	FlowIDs, err := RsrcMgr.ResourceMgrs[ponIntfID].GetResourceID(context.Background(), ponIntfID,
+		ponrmgr.FLOW_ID, 1)
 	if err != nil {
-		log.Error("Failed to update Alloc ID")
-		return 0
+		log.Errorf("Failed to get resource for interface %d for type %s",
+			ponIntfID, ponrmgr.FLOW_ID)
+		return uint32(0), err
 	}
-	log.Debugw("Allocated new Tcont from pon resource mgr", log.Fields{"AllocID": AllocID})
-	return AllocID[0]
-}
-
-// GetGEMPortID gets gem port id for a particular pon port, onu id and uni id and then update the resource map on
-// the KV store with the list of gemport_id allocated for the pon_intf_onu_id tuple
-func (RsrcMgr *OpenOltResourceMgr) GetGEMPortID(ponPort uint32, onuID uint32,
-	uniID uint32, NumOfPorts uint32) ([]uint32, error) {
-
-	/* Get gem port id for a particular pon port, onu id
-	   and uni id.
-	*/
-
-	var err error
-	IntfOnuIDUniID := fmt.Sprintf("%d,%d,%d", ponPort, onuID, uniID)
-
-	GEMPortList := RsrcMgr.ResourceMgrs[ponPort].GetCurrentGEMPortIDsForOnu(context.Background(), IntfOnuIDUniID)
-	if GEMPortList != nil {
-		return GEMPortList, nil
-	}
-
-	GEMPortList, err = RsrcMgr.ResourceMgrs[ponPort].GetResourceID(context.Background(), ponPort,
-		ponrmgr.GEMPORT_ID, NumOfPorts)
-	if err != nil && GEMPortList == nil {
-		log.Errorf("Failed to get gem port id for %s", IntfOnuIDUniID)
-		return nil, err
-	}
-
-	// update the resource map on KV store with the list of gemport_id
-	// allocated for the pon_intf_onu_id tuple
-	err = RsrcMgr.ResourceMgrs[ponPort].UpdateGEMPortIDsForOnu(context.Background(), IntfOnuIDUniID,
-		GEMPortList)
-	if err != nil {
-		log.Errorf("Failed to update GEM ports to kv store for %s", IntfOnuIDUniID)
-		return nil, err
-	}
-
-	return GEMPortList, err
-}
-
-// FreeFlowID returns the free flow id for a given interface, onu id and uni id
-func (RsrcMgr *OpenOltResourceMgr) FreeFlowID(IntfID uint32, onuID int32,
-	uniID int32, FlowID uint32) {
-	var IntfONUID string
-	var err error
-	FlowIds := make([]uint32, 0)
-
-	FlowIds = append(FlowIds, FlowID)
-	IntfONUID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
-	err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(context.Background(), IntfONUID, FlowID, false)
-	if err != nil {
-		log.Errorw("Failed to Update flow id  for", log.Fields{"intf": IntfONUID})
-	}
-	RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(context.Background(), IntfONUID, FlowID)
-	RsrcMgr.ResourceMgrs[IntfID].FreeResourceID(context.Background(), IntfID, ponrmgr.FLOW_ID, FlowIds)
-}
-
-// FreeFlowIDs releases the flow Ids
-func (RsrcMgr *OpenOltResourceMgr) FreeFlowIDs(IntfID uint32, onuID uint32,
-	uniID uint32, FlowID []uint32) {
-
-	RsrcMgr.ResourceMgrs[IntfID].FreeResourceID(context.Background(), IntfID, ponrmgr.FLOW_ID, FlowID)
-
-	var IntfOnuIDUniID string
-	var err error
-	for _, flow := range FlowID {
-		IntfOnuIDUniID = fmt.Sprintf("%d,%d,%d", IntfID, onuID, uniID)
-		err = RsrcMgr.ResourceMgrs[IntfID].UpdateFlowIDForOnu(context.Background(), IntfOnuIDUniID, flow, false)
-		if err != nil {
-			log.Errorw("Failed to Update flow id for", log.Fields{"intf": IntfOnuIDUniID})
-		}
-		RsrcMgr.ResourceMgrs[IntfID].RemoveFlowIDInfo(context.Background(), IntfOnuIDUniID, flow)
-	}
-}
-
-// FreeAllocID frees AllocID on the PON resource pool and also frees the allocID association
-// for the given OLT device.
-func (RsrcMgr *OpenOltResourceMgr) FreeAllocID(IntfID uint32, allocID uint32) {
-	allocIDs := make([]uint32, 0)
-	allocIDs = append(allocIDs, allocID)
-	RsrcMgr.ResourceMgrs[IntfID].FreeResourceID(context.Background(), IntfID, ponrmgr.ALLOC_ID, allocIDs)
-}
-
-// FreeGemPortID frees GemPortID on the PON resource pool and also frees the gemPortID association
-// for the given OLT device.
-func (RsrcMgr *OpenOltResourceMgr) FreeGemPortID(IntfID uint32, gemPortID uint32) {
-	gemPortIDs := make([]uint32, 0)
-	gemPortIDs = append(gemPortIDs, gemPortID)
-	RsrcMgr.ResourceMgrs[IntfID].FreeResourceID(context.Background(), IntfID, ponrmgr.GEMPORT_ID, gemPortIDs)
+	return FlowIDs[0], err
 }
diff --git a/core/subscriber_manager.go b/core/subscriber_manager.go
index d84fad9..3cf65ad 100644
--- a/core/subscriber_manager.go
+++ b/core/subscriber_manager.go
@@ -18,6 +18,7 @@
 
 import (
 	"fmt"
+	"sync"
 
 	"github.com/opencord/openolt-scale-tester/config"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
@@ -86,9 +87,11 @@
 	OpenOltClient oop.OpenoltClient
 	TestConfig    *config.OpenOltScaleTesterConfig
 	RsrMgr        *OpenOltResourceMgr
+	subWg         *sync.WaitGroup
 }
 
-func (subs *Subscriber) Start(onuCh chan bool, isGroup bool) {
+func (subs *Subscriber) Start(isGroup bool) {
+
 	var err error
 
 	log.Infow("workflow-deploy-started-for-subscriber", log.Fields{"subsName": subs.SubscriberName})
@@ -97,22 +100,20 @@
 
 	for _, tpID := range subs.TestConfig.TpIDList {
 		uniPortName := fmt.Sprintf(UniPortName, subs.PonIntf, subs.OnuID, subs.UniID)
-		if subs.TpInstance[tpID], err =
-			subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.CreateTechProfInstance(context.Background(),
-				uint32(tpID), uniPortName, subs.PonIntf); err != nil {
+		subs.RsrMgr.GemIDAllocIDLock[subs.PonIntf].Lock()
+		subs.TpInstance[tpID], err = subs.RsrMgr.ResourceMgrs[subs.PonIntf].TechProfileMgr.CreateTechProfInstance(context.Background(),
+			uint32(tpID), uniPortName, subs.PonIntf)
+		subs.RsrMgr.GemIDAllocIDLock[subs.PonIntf].Unlock()
+		if err != nil {
 			log.Errorw("error-creating-tp-instance-for-subs",
 				log.Fields{"subsName": subs.SubscriberName, "onuID": subs.OnuID, "tpID": tpID})
 
 			subs.Reason = ReasonCodeToReasonString(TP_INSTANCE_CREATION_FAILED)
-			onuCh <- true
-
 			return
 		}
 	}
 
-	DeployWorkflow(subs, isGroup)
+	go DeployWorkflow(subs, isGroup)
 
-	log.Infow("workflow-deploy-completed-for-subscriber", log.Fields{"subsName": subs.SubscriberName})
-
-	onuCh <- true
+	log.Infow("workflow-deploy-started-for-subscriber", log.Fields{"subsName": subs.SubscriberName})
 }
diff --git a/core/tt_workflow.go b/core/tt_workflow.go
index 862da20..7b14007 100644
--- a/core/tt_workflow.go
+++ b/core/tt_workflow.go
@@ -23,7 +23,6 @@
 
 	"github.com/opencord/openolt-scale-tester/config"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager"
 	oop "github.com/opencord/voltha-protos/v3/go/openolt"
 	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
 	"golang.org/x/net/context"
@@ -42,11 +41,11 @@
 }
 
 func AddTtDhcpIPV4Flow(oo oop.OpenoltClient, config *config.OpenOltScaleTesterConfig, rsrMgr *OpenOltResourceMgr) error {
-	var flowID []uint32
+	var flowID uint32
 	var err error
 
-	if flowID, err = rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].GetResourceID(context.Background(), uint32(config.NniIntfID),
-		ponresourcemanager.FLOW_ID, 1); err != nil {
+	// Allocating flowID from PON0 pool for an trap-from-nni flow
+	if flowID, err = rsrMgr.GetFlowID(context.Background(), uint32(0)); err != nil {
 		return err
 	}
 
@@ -55,7 +54,7 @@
 	actionCmd := &oop.ActionCmd{TrapToHost: true}
 	actionInfo := &oop.Action{Cmd: actionCmd}
 
-	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID[0],
+	flow := oop.Flow{AccessIntfId: -1, OnuId: -1, UniId: -1, FlowId: flowID,
 		FlowType: "downstream", AllocId: -1, GemportId: -1,
 		Classifier: flowClassifier, Action: actionInfo,
 		Priority: 1000, PortNo: uint32(config.NniIntfID)}
@@ -70,8 +69,6 @@
 
 	if err != nil {
 		log.Errorw("Failed to Add DHCP IPv4 to device", log.Fields{"err": err, "deviceFlow": flow})
-		rsrMgr.ResourceMgrs[uint32(config.NniIntfID)].FreeResourceID(context.Background(), uint32(config.NniIntfID),
-			ponresourcemanager.FLOW_ID, flowID)
 		return err
 	}
 	log.Debugw("DHCP IPV4 added to device successfully ", log.Fields{"flow": flow})
@@ -334,7 +331,7 @@
 
 func (tt TtWorkFlow) ProvisionHsiaFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -347,23 +344,19 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
 					var errUs, errDs error
-					if errUs = AddTtFlow(subs, HsiaFlow, Upstream, flowID[0], allocID, gemID, pcp); errUs != nil {
+					if errUs = AddTtFlow(subs, HsiaFlow, Upstream, flowID, allocID, gemID, pcp); errUs != nil {
 						log.Errorw("failed to install US HSIA flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDs = AddTtFlow(subs, HsiaFlow, Downstream, flowID[0], allocID, gemID, pcp); errDs != nil {
+					if errDs = AddTtFlow(subs, HsiaFlow, Downstream, flowID, allocID, gemID, pcp); errDs != nil {
 						log.Errorw("failed to install DS HSIA flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errUs != nil && errDs != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
-					}
+
 					if errUs != nil || errDs != nil {
 						if errUs != nil {
 							return errUs
@@ -379,7 +372,7 @@
 
 func (tt TtWorkFlow) ProvisionVoipFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -392,27 +385,23 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
 					var errUs, errDs, errDhcp error
-					if errUs = AddTtFlow(subs, VoipFlow, Upstream, flowID[0], allocID, gemID, pcp); errUs != nil {
+					if errUs = AddTtFlow(subs, VoipFlow, Upstream, flowID, allocID, gemID, pcp); errUs != nil {
 						log.Errorw("failed to install US VOIP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDs = AddTtFlow(subs, VoipFlow, Downstream, flowID[0], allocID, gemID, pcp); errDs != nil {
+					if errDs = AddTtFlow(subs, VoipFlow, Downstream, flowID, allocID, gemID, pcp); errDs != nil {
 						log.Errorw("failed to install DS VOIP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID[0], allocID, gemID, pcp); errDhcp != nil {
+					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID, allocID, gemID, pcp); errDhcp != nil {
 						log.Errorw("failed to install US VOIP-DHCP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errUs != nil && errDs != nil && errDhcp != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
-					}
+
 					if errUs != nil || errDs != nil || errDhcp != nil {
 						if errUs != nil {
 							return errUs
@@ -431,7 +420,7 @@
 
 func (tt TtWorkFlow) ProvisionVodFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -444,31 +433,27 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
 					var errUs, errDs, errDhcp, errIgmp error
-					if errUs = AddTtFlow(subs, VodFlow, Upstream, flowID[0], allocID, gemID, pcp); errUs != nil {
+					if errUs = AddTtFlow(subs, VodFlow, Upstream, flowID, allocID, gemID, pcp); errUs != nil {
 						log.Errorw("failed to install US VOIP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDs = AddTtFlow(subs, VodFlow, Downstream, flowID[0], allocID, gemID, pcp); errDs != nil {
+					if errDs = AddTtFlow(subs, VodFlow, Downstream, flowID, allocID, gemID, pcp); errDs != nil {
 						log.Errorw("failed to install DS VOIP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID[0], allocID, gemID, pcp); errDhcp != nil {
+					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID, allocID, gemID, pcp); errDhcp != nil {
 						log.Errorw("failed to install US VOIP-DHCP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errIgmp = AddTtFlow(subs, IgmpFlow, Upstream, flowID[0], allocID, gemID, pcp); errIgmp != nil {
+					if errIgmp = AddTtFlow(subs, IgmpFlow, Upstream, flowID, allocID, gemID, pcp); errIgmp != nil {
 						log.Errorw("failed to install US VOIP-IGMP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errUs != nil && errDs != nil && errDhcp != nil && errIgmp != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
-					}
+
 					if errUs != nil || errDs != nil || errDhcp != nil || errIgmp != nil {
 						if errUs != nil {
 							return errUs
@@ -490,7 +475,7 @@
 
 func (tt TtWorkFlow) ProvisionMgmtFlow(subs *Subscriber) error {
 	var err error
-	var flowID []uint32
+	var flowID uint32
 	var gemPortIDs []uint32
 
 	var allocID = subs.TpInstance[subs.TestConfig.TpIDList[0]].UsScheduler.AllocID
@@ -503,27 +488,23 @@
 		for pos, pbitSet := range strings.TrimPrefix(pBitMap, "0b") {
 			if pbitSet == '1' {
 				pcp := uint32(len(strings.TrimPrefix(pBitMap, "0b"))) - 1 - uint32(pos)
-				if flowID, err = subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].GetResourceID(context.Background(), uint32(subs.PonIntf),
-					ponresourcemanager.FLOW_ID, 1); err != nil {
+				if flowID, err = subs.RsrMgr.GetFlowID(context.Background(), uint32(subs.PonIntf)); err != nil {
 					return errors.New(ReasonCodeToReasonString(FLOW_ID_GENERATION_FAILED))
 				} else {
 					var errUs, errDs, errDhcp error
-					if errUs = AddTtFlow(subs, MgmtFlow, Upstream, flowID[0], allocID, gemID, pcp); errUs != nil {
+					if errUs = AddTtFlow(subs, MgmtFlow, Upstream, flowID, allocID, gemID, pcp); errUs != nil {
 						log.Errorw("failed to install US MGMT flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDs = AddTtFlow(subs, MgmtFlow, Downstream, flowID[0], allocID, gemID, pcp); errDs != nil {
+					if errDs = AddTtFlow(subs, MgmtFlow, Downstream, flowID, allocID, gemID, pcp); errDs != nil {
 						log.Errorw("failed to install DS MGMT flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID[0], allocID, gemID, pcp); errDhcp != nil {
+					if errDhcp = AddFlow(subs, DhcpFlowIPV4, Upstream, flowID, allocID, gemID, pcp); errDhcp != nil {
 						log.Errorw("failed to install US MGMT-DHCP flow",
 							log.Fields{"onuID": subs.OnuID, "uniID": subs.UniID, "intf": subs.PonIntf})
 					}
-					if errUs != nil && errDs != nil && errDhcp != nil {
-						subs.RsrMgr.ResourceMgrs[uint32(subs.PonIntf)].FreeResourceID(context.Background(), uint32(subs.PonIntf),
-							ponresourcemanager.FLOW_ID, flowID)
-					}
+
 					if errUs != nil || errDs != nil || errDhcp != nil {
 						if errUs != nil {
 							return errUs
diff --git a/core/workflow_manager.go b/core/workflow_manager.go
index 1112a7e..e9bb24e 100644
--- a/core/workflow_manager.go
+++ b/core/workflow_manager.go
@@ -44,6 +44,9 @@
 }
 
 func DeployWorkflow(subs *Subscriber, isGroup bool) {
+
+	defer subs.subWg.Done()
+
 	var wf = getWorkFlow(subs)
 
 	if isGroup {
@@ -104,6 +107,7 @@
 		}
 	}
 
+	log.Infow("subscriber-provision-completed-from-onu-manager", log.Fields{"subsName": subs.SubscriberName})
 	subs.Reason = ReasonCodeToReasonString(SUBSCRIBER_PROVISION_SUCCESS)
 }
 
diff --git a/core/workflow_utils.go b/core/workflow_utils.go
index 4ab7356..a223c02 100644
--- a/core/workflow_utils.go
+++ b/core/workflow_utils.go
@@ -19,6 +19,7 @@
 import (
 	"errors"
 	"math/rand"
+	"time"
 
 	"github.com/opencord/openolt-scale-tester/config"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
@@ -537,3 +538,76 @@
 
 	return nil
 }
+
+func CreateTrafficSchedWithRetry(OpenOltClient oop.OpenoltClient, sched *oop.TrafficSchedulers) error {
+	maxRetry := 20
+	if _, err := OpenOltClient.CreateTrafficSchedulers(context.Background(), sched); err == nil {
+		log.Info("succeeded in first attempt")
+		return nil
+	} else {
+		log.Info("going for a retry")
+	}
+	for i := 0; i < maxRetry; i++ {
+		if _, err := OpenOltClient.CreateTrafficSchedulers(context.Background(), sched); err != nil {
+			log.Error("retying after delay")
+			time.Sleep(50 * time.Millisecond)
+			continue
+		} else {
+			log.Infow("succeeded in retry iteration=%d!!", log.Fields{"i": i})
+			return nil
+		}
+	}
+
+	return errors.New("failed-to-create-traffic-sched-after-all-retries")
+}
+
+func CreateTrafficQueuesWithRetry(OpenOltClient oop.OpenoltClient, queue *oop.TrafficQueues) error {
+	maxRetry := 20
+	if _, err := OpenOltClient.CreateTrafficQueues(context.Background(), queue); err == nil {
+		log.Info("succeeded in first attempt")
+		return nil
+	}
+	for i := 0; i < maxRetry; i++ {
+		if _, err := OpenOltClient.CreateTrafficQueues(context.Background(), queue); err != nil {
+			time.Sleep(50 * time.Millisecond)
+			continue
+		} else {
+			log.Infow("succeeded in retry iteration=%d!!", log.Fields{"i": i})
+			return nil
+		}
+	}
+
+	return errors.New("failed-to-create-traffic-queue-after-all-retries")
+}
+
+func AddFlowWithRetry(OpenOltClient oop.OpenoltClient, flow *oop.Flow) error {
+
+	var err error
+	maxRetry := 20
+
+	_, err = OpenOltClient.FlowAdd(context.Background(), flow)
+
+	st, _ := status.FromError(err)
+	if st.Code() == codes.AlreadyExists {
+		log.Debugw("Flow already exists", log.Fields{"err": err, "deviceFlow": flow})
+		return nil
+	}
+	if st.Code() == codes.ResourceExhausted {
+		for i := 0; i < maxRetry; i++ {
+			_, err = OpenOltClient.FlowAdd(context.Background(), flow)
+			st, _ := status.FromError(err)
+			if st.Code() == codes.ResourceExhausted {
+				log.Error("flow-install-failed--retrying")
+				continue
+			} else if st.Code() == codes.OK {
+				log.Infow("flow-install-succeeded-on-retry", log.Fields{"i": i, "flow": flow})
+				return nil
+			}
+		}
+
+	}
+
+	log.Debugw("Flow install failed on all retries ", log.Fields{"flow": flow})
+
+	return err
+}
diff --git a/docker/Dockerfile b/docker/Dockerfile
index a1b04ad..2d57b37 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -18,16 +18,16 @@
 FROM golang:1.12-alpine3.9 AS build-env
 
 # Install required packages
-RUN apk add --no-cache wget git make build-base protobuf protobuf-dev
+RUN apk add --no-cache build-base=0.5-r1
 
 # Prepare directory structure
-WORKDIR /go/src
+WORKDIR /go/src/github.com/opencord/openolt-scale-tester
 COPY . .
 
 # Build openolt
 SHELL ["/bin/ash", "-o", "pipefail", "-c"]
 ENV GO111MODULE=on
-RUN  go mod vendor && go build -mod=vendor -o /go/bin/openolt-scale-tester
+RUN  go build -mod=vendor -o /go/bin/openolt-scale-tester
 
 # -------------
 # Image creation stage
@@ -39,4 +39,4 @@
 
 # Copy required files
 COPY --from=build-env /go/bin/openolt-scale-tester /app/
-COPY --from=build-env /go/src/tech-profiles /app/
+COPY --from=build-env /go/src/github.com/opencord/openolt-scale-tester/tech-profiles /app/
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
new file mode 100644
index 0000000..faa86ed
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -0,0 +1,269 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package db
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+const (
+	// Default Minimal Interval for posting alive state of backend kvstore on Liveness Channel
+	DefaultLivenessChannelInterval = time.Second * 30
+)
+
+// Backend structure holds details for accessing the kv store
+type Backend struct {
+	sync.RWMutex
+	Client                  kvstore.Client
+	StoreType               string
+	Host                    string
+	Port                    int
+	Timeout                 int
+	PathPrefix              string
+	alive                   bool          // Is this backend connection alive?
+	liveness                chan bool     // channel to post alive state
+	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
+	lastLivenessTime        time.Time     // Instant of last alive state push
+}
+
+// NewBackend creates a new instance of a Backend structure
+func NewBackend(storeType string, host string, port int, timeout int, pathPrefix string) *Backend {
+	var err error
+
+	b := &Backend{
+		StoreType:               storeType,
+		Host:                    host,
+		Port:                    port,
+		Timeout:                 timeout,
+		LivenessChannelInterval: DefaultLivenessChannelInterval,
+		PathPrefix:              pathPrefix,
+		alive:                   false, // connection considered down at start
+	}
+
+	address := host + ":" + strconv.Itoa(port)
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		logger.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": storeType, "host": host, "port": port,
+				"timeout": timeout, "prefix": pathPrefix,
+				"error": err.Error(),
+			})
+	}
+
+	return b
+}
+
+func (b *Backend) newClient(address string, timeout int) (kvstore.Client, error) {
+	switch b.StoreType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func (b *Backend) makePath(key string) string {
+	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
+	return path
+}
+
+func (b *Backend) updateLiveness(alive bool) {
+	// Periodically push stream of liveness data to the channel,
+	// so that in a live state, the core does not timeout and
+	// send a forced liveness message. Push alive state if the
+	// last push to channel was beyond livenessChannelInterval
+	if b.liveness != nil {
+
+		if b.alive != alive {
+			logger.Debug("update-liveness-channel-reason-change")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
+			logger.Debug("update-liveness-channel-reason-interval")
+			b.liveness <- alive
+			b.lastLivenessTime = time.Now()
+		}
+	}
+
+	// Emit log message only for alive state change
+	if b.alive != alive {
+		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		b.alive = alive
+	}
+}
+
+// Perform a dummy Key Lookup on kvstore to test Connection Liveness and
+// post on Liveness channel
+func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
+	alive := b.Client.IsConnectionUp(ctx)
+	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+
+	b.updateLiveness(alive)
+	return alive
+}
+
+// Enable the liveness monitor channel. This channel will report
+// a "true" or "false" on every kvstore operation which indicates whether
+// or not the connection is still Live. This channel is then picked up
+// by the service (i.e. rw_core / ro_core) to update readiness status
+// and/or take other actions.
+func (b *Backend) EnableLivenessChannel() chan bool {
+	logger.Debug("enable-kvstore-liveness-channel")
+
+	if b.liveness == nil {
+		logger.Debug("create-kvstore-liveness-channel")
+
+		// Channel size of 10 to avoid any possibility of blocking in Load conditions
+		b.liveness = make(chan bool, 10)
+
+		// Post initial alive state
+		b.liveness <- b.alive
+		b.lastLivenessTime = time.Now()
+	}
+
+	return b.liveness
+}
+
+// Extract Alive status of Kvstore based on type of error
+func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+	// Alive unless observed an error indicating so
+	alive := true
+
+	if err != nil {
+
+		// timeout indicates kvstore not reachable/alive
+		if err == context.DeadlineExceeded {
+			alive = false
+		}
+
+		// Need to analyze client-specific errors based on backend type
+		if b.StoreType == "etcd" {
+
+			// For etcd backend, consider not-alive only for errors indicating
+			// timedout request or unavailable/corrupted cluster. For all remaining
+			// error codes listed in https://godoc.org/google.golang.org/grpc/codes#Code,
+			// we would not infer a not-alive backend because such a error may also
+			// occur due to bad client requests or sequence of operations
+			switch status.Code(err) {
+			case codes.DeadlineExceeded:
+				fallthrough
+			case codes.Unavailable:
+				fallthrough
+			case codes.DataLoss:
+				alive = false
+			}
+
+			//} else {
+			// TODO: Implement for consul backend; would it be needed ever?
+		}
+	}
+
+	return alive
+}
+
+// List retrieves one or more items that match the specified key
+func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.List(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Get retrieves an item that matches the specified key
+func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+
+	pair, err := b.Client.Get(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return pair, err
+}
+
+// Put stores an item value under the specifed key
+func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("putting-key", log.Fields{"key": key, "value": value, "path": formattedPath})
+
+	err := b.Client.Put(ctx, formattedPath, value)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// Delete removes an item under the specified key
+func (b *Backend) Delete(ctx context.Context, key string) error {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+
+	err := b.Client.Delete(ctx, formattedPath)
+
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+
+	return err
+}
+
+// CreateWatch starts watching events for the specified key
+func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	return b.Client.Watch(ctx, formattedPath, withPrefix)
+}
+
+// DeleteWatch stops watching events for the specified key
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	b.Lock()
+	defer b.Unlock()
+
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(formattedPath, ch)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
new file mode 100644
index 0000000..a5a79ae
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package db
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
new file mode 100644
index 0000000..b9cb1ee
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import "context"
+
+const (
+	// Default timeout in seconds when making a kvstore request
+	defaultKVGetTimeout = 5
+	// Maximum channel buffer between publisher/subscriber goroutines
+	maxClientChannelBufferSize = 10
+)
+
+// These constants represent the event types returned by the KV client
+const (
+	PUT = iota
+	DELETE
+	CONNECTIONDOWN
+	UNKNOWN
+)
+
+// KVPair is a common wrapper for key-value pairs returned from the KV store
+type KVPair struct {
+	Key     string
+	Value   interface{}
+	Version int64
+	Session string
+	Lease   int64
+}
+
+// NewKVPair creates a new KVPair object
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
+	kv := new(KVPair)
+	kv.Key = key
+	kv.Value = value
+	kv.Session = session
+	kv.Lease = lease
+	kv.Version = version
+	return kv
+}
+
+// Event is generated by the KV client when a key change is detected
+type Event struct {
+	EventType int
+	Key       interface{}
+	Value     interface{}
+	Version   int64
+}
+
+// NewEvent creates a new Event object
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
+	evnt := new(Event)
+	evnt.EventType = eventType
+	evnt.Key = key
+	evnt.Value = value
+	evnt.Version = version
+
+	return evnt
+}
+
+// Client represents the set of APIs a KV Client must implement
+type Client interface {
+	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	Get(ctx context.Context, key string) (*KVPair, error)
+	Put(ctx context.Context, key string, value interface{}) error
+	Delete(ctx context.Context, key string) error
+	Reserve(ctx context.Context, key string, value interface{}, ttl int64) (interface{}, error)
+	ReleaseReservation(ctx context.Context, key string) error
+	ReleaseAllReservations(ctx context.Context) error
+	RenewReservation(ctx context.Context, key string) error
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+	AcquireLock(ctx context.Context, lockName string, timeout int) error
+	ReleaseLock(lockName string) error
+	IsConnectionUp(ctx context.Context) bool // timeout in second
+	CloseWatch(key string, ch chan *Event)
+	Close()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..2d2a6a6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+)
+
+const (
+	logLevel = log.ErrorLevel
+)
+
+var logger log.Logger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
new file mode 100644
index 0000000..bdf2d10
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
@@ -0,0 +1,512 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	log "github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"sync"
+	"time"
+	//log "ciena.com/coordinator/common"
+	consulapi "github.com/hashicorp/consul/api"
+)
+
+type channelContextMap struct {
+	ctx     context.Context
+	channel chan *Event
+	cancel  context.CancelFunc
+}
+
+// ConsulClient represents the consul KV store client
+type ConsulClient struct {
+	session                *consulapi.Session
+	sessionID              string
+	consul                 *consulapi.Client
+	doneCh                 *chan int
+	keyReservations        map[string]interface{}
+	watchedChannelsContext map[string][]*channelContextMap
+	writeLock              sync.Mutex
+}
+
+// NewConsulClient returns a new client for the Consul KV store
+func NewConsulClient(addr string, timeout int) (*ConsulClient, error) {
+
+	duration := GetDuration(timeout)
+
+	config := consulapi.DefaultConfig()
+	config.Address = addr
+	config.WaitTime = duration
+	consul, err := consulapi.NewClient(config)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+
+	doneCh := make(chan int, 1)
+	wChannelsContext := make(map[string][]*channelContextMap)
+	reservations := make(map[string]interface{})
+	return &ConsulClient{consul: consul, doneCh: &doneCh, watchedChannelsContext: wChannelsContext, keyReservations: reservations}, nil
+}
+
+// IsConnectionUp returns whether the connection to the Consul KV store is up
+func (c *ConsulClient) IsConnectionUp(ctx context.Context) bool {
+	logger.Error("Unimplemented function")
+	return false
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+
+	deadline, _ := ctx.Deadline()
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = GetDuration(deadline.Second())
+	// For now we ignore meta data
+	kvps, _, err := kv.List(key, &queryOptions)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, kvp := range kvps {
+		m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Get(ctx context.Context, key string) (*KVPair, error) {
+
+	deadline, _ := ctx.Deadline()
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = GetDuration(deadline.Second())
+	// For now we ignore meta data
+	kvp, _, err := kv.Get(key, &queryOptions)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	if kvp != nil {
+		return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1), nil
+	}
+
+	return nil, nil
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the consul API
+// accepts only a []byte as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can create a byte array from the value as consul API expects a byte array
+	var val []byte
+	var er error
+	if val, er = ToByte(value); er != nil {
+		logger.Error(er)
+		return er
+	}
+
+	// Create a key value pair
+	kvp := consulapi.KVPair{Key: key, Value: val}
+	kv := c.consul.KV()
+	var writeOptions consulapi.WriteOptions
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	_, err := kv.Put(&kvp, &writeOptions)
+	if err != nil {
+		logger.Error(err)
+		return err
+	}
+	return nil
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *ConsulClient) Delete(ctx context.Context, key string) error {
+	kv := c.consul.KV()
+	var writeOptions consulapi.WriteOptions
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	_, err := kv.Delete(key, &writeOptions)
+	if err != nil {
+		logger.Error(err)
+		return err
+	}
+	return nil
+}
+
+func (c *ConsulClient) deleteSession() {
+	if c.sessionID != "" {
+		logger.Debug("cleaning-up-session")
+		session := c.consul.Session()
+		_, err := session.Destroy(c.sessionID, nil)
+		if err != nil {
+			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+		}
+	}
+	c.sessionID = ""
+	c.session = nil
+}
+
+func (c *ConsulClient) createSession(ttl int64, retries int) (*consulapi.Session, string, error) {
+	session := c.consul.Session()
+	entry := &consulapi.SessionEntry{
+		Behavior: consulapi.SessionBehaviorDelete,
+		TTL:      "10s", // strconv.FormatInt(ttl, 10) + "s", // disable ttl
+	}
+
+	for {
+		id, meta, err := session.Create(entry, nil)
+		if err != nil {
+			logger.Errorw("create-session-error", log.Fields{"error": err})
+			if retries == 0 {
+				return nil, "", err
+			}
+		} else if meta.RequestTime == 0 {
+			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			if retries == 0 {
+				return nil, "", errors.New("bad-meta-data")
+			}
+		} else if id == "" {
+			logger.Error("create-session-nil-id")
+			if retries == 0 {
+				return nil, "", errors.New("ID-nil")
+			}
+		} else {
+			return session, id, nil
+		}
+		// If retry param is -1 we will retry indefinitely
+		if retries > 0 {
+			retries--
+		}
+		logger.Debug("retrying-session-create-after-a-second-delay")
+		time.Sleep(time.Duration(1) * time.Second)
+	}
+}
+
+// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
+// string types
+func isEqual(val1 interface{}, val2 interface{}) bool {
+	b1, err := ToByte(val1)
+	b2, er := ToByte(val2)
+	if err == nil && er == nil {
+		return bytes.Equal(b1, b2)
+	}
+	return val1 == val2
+}
+
+// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
+// the consul API accepts only a []byte.  Timeout defines how long the function will wait for a response.  TTL
+// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
+// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
+// then the value assigned to that key will be returned.
+func (c *ConsulClient) Reserve(ctx context.Context, key string, value interface{}, ttl int64) (interface{}, error) {
+
+	// Validate that we can create a byte array from the value as consul API expects a byte array
+	var val []byte
+	var er error
+	if val, er = ToByte(value); er != nil {
+		logger.Error(er)
+		return nil, er
+	}
+
+	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
+	if c.sessionID != "" {
+		c.deleteSession()
+	}
+
+	// Clear session if reservation is not successful
+	reservationSuccessful := false
+	defer func() {
+		if !reservationSuccessful {
+			logger.Debug("deleting-session")
+			c.deleteSession()
+		}
+	}()
+
+	session, sessionID, err := c.createSession(ttl, -1)
+	if err != nil {
+		logger.Errorw("no-session-created", log.Fields{"error": err})
+		return "", errors.New("no-session-created")
+	}
+	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
+	c.sessionID = sessionID
+	c.session = session
+
+	// Try to grap the Key using the session
+	kv := c.consul.KV()
+	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
+	result, _, err := kv.Acquire(&kvp, nil)
+	if err != nil {
+		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
+		return nil, err
+	}
+
+	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+
+	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
+	m, err := c.Get(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+	if m != nil {
+		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		if m.Key == key && isEqual(m.Value, value) {
+			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
+			// per session.
+			reservationSuccessful = true
+			c.writeLock.Lock()
+			c.keyReservations[key] = m.Value
+			c.writeLock.Unlock()
+			return m.Value, nil
+		}
+		// My reservation has failed.  Return the owner of that key
+		return m.Value, nil
+	}
+	return nil, nil
+}
+
+// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
+func (c *ConsulClient) ReleaseAllReservations(ctx context.Context) error {
+	kv := c.consul.KV()
+	var kvp consulapi.KVPair
+	var result bool
+	var err error
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	for key, value := range c.keyReservations {
+		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
+		result, _, err = kv.Release(&kvp, nil)
+		if err != nil {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+		if !result {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// ReleaseReservation releases reservation for a specific key.
+func (c *ConsulClient) ReleaseReservation(ctx context.Context, key string) error {
+	var ok bool
+	var reservedValue interface{}
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if reservedValue, ok = c.keyReservations[key]; !ok {
+		return errors.New("key-not-reserved:" + key)
+	}
+	// Release the reservation
+	kv := c.consul.KV()
+	kvp := consulapi.KVPair{Key: key, Value: reservedValue.([]byte), Session: c.sessionID}
+
+	result, _, er := kv.Release(&kvp, nil)
+	if er != nil {
+		return er
+	}
+	// Remove that key entry on success
+	if result {
+		delete(c.keyReservations, key)
+		return nil
+	}
+	return errors.New("key-cannot-be-unreserved")
+}
+
+// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
+// period specified when reserving the key
+func (c *ConsulClient) RenewReservation(ctx context.Context, key string) error {
+	// In the case of Consul, renew reservation of a reserve key only require renewing the client session.
+
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	// Verify the key was reserved
+	if _, ok := c.keyReservations[key]; !ok {
+		return errors.New("key-not-reserved")
+	}
+
+	if c.session == nil {
+		return errors.New("no-session-exist")
+	}
+
+	var writeOptions consulapi.WriteOptions
+	if _, _, err := c.session.Renew(c.sessionID, &writeOptions); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *ConsulClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Create a context to track this request
+	watchContext, cFunc := context.WithCancel(context.Background())
+
+	// Save the channel and context reference for later
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	ccm := channelContextMap{channel: ch, ctx: watchContext, cancel: cFunc}
+	c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key], &ccm)
+
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(watchContext, key, ch)
+
+	return ch
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *ConsulClient) CloseWatch(key string, ch chan *Event) {
+	// First close the context
+	var ok bool
+	var watchedChannelsContexts []*channelContextMap
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
+		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chCtxMap := range watchedChannelsContexts {
+		if chCtxMap.channel == ch {
+			logger.Debug("channel-found")
+			chCtxMap.cancel()
+			//close the channel
+			close(ch)
+			pos = i
+			break
+		}
+	}
+	// Remove that entry if present
+	if pos >= 0 {
+		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
+	}
+	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+}
+
+func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
+	if (kv1 == nil) && (kv2 == nil) {
+		return true
+	} else if (kv1 == nil) || (kv2 == nil) {
+		return false
+	}
+	// Both the KV should be non-null here
+	if kv1.Key != kv2.Key ||
+		!bytes.Equal(kv1.Value, kv2.Value) ||
+		kv1.Session != kv2.Session ||
+		kv1.LockIndex != kv2.LockIndex ||
+		kv1.ModifyIndex != kv2.ModifyIndex {
+		return false
+	}
+	return true
+}
+
+func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
+	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+
+	defer c.CloseWatch(key, ch)
+	duration := GetDuration(defaultKVGetTimeout)
+	kv := c.consul.KV()
+	var queryOptions consulapi.QueryOptions
+	queryOptions.WaitTime = duration
+
+	// Get the existing value, if any
+	previousKVPair, meta, err := kv.Get(key, &queryOptions)
+	if err != nil {
+		logger.Debug(err)
+	}
+	lastIndex := meta.LastIndex
+
+	// Wait for change.  Push any change onto the channel and keep waiting for new update
+	//var waitOptions consulapi.QueryOptions
+	var pair *consulapi.KVPair
+	//watchContext, _ := context.WithCancel(context.Background())
+	waitOptions := queryOptions.WithContext(watchContext)
+	for {
+		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
+		waitOptions.WaitIndex = lastIndex
+		pair, meta, err = kv.Get(key, waitOptions)
+		select {
+		case <-watchContext.Done():
+			logger.Debug("done-event-received-exiting")
+			return
+		default:
+			if err != nil {
+				logger.Warnw("error-from-watch", log.Fields{"error": err})
+				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
+			} else {
+				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+			}
+		}
+		if err != nil {
+			logger.Debug(err)
+			// On error, block for 10 milliseconds to prevent endless loop
+			time.Sleep(10 * time.Millisecond)
+		} else if meta.LastIndex <= lastIndex {
+			logger.Info("no-index-change-or-negative")
+		} else {
+			logger.Debugw("update-received", log.Fields{"pair": pair})
+			if pair == nil {
+				ch <- NewEvent(DELETE, key, []byte(""), -1)
+			} else if !c.isKVEqual(pair, previousKVPair) {
+				// Push the change onto the channel if the data has changed
+				// For now just assume it's a PUT change
+				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
+			}
+			previousKVPair = pair
+			lastIndex = meta.LastIndex
+		}
+	}
+}
+
+// Close closes the KV store client
+func (c *ConsulClient) Close() {
+	var writeOptions consulapi.WriteOptions
+	// Inform any goroutine it's time to say goodbye.
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	if c.doneCh != nil {
+		close(*c.doneCh)
+	}
+
+	// Clear the sessionID
+	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
+	}
+}
+
+func (c *ConsulClient) AcquireLock(ctx context.Context, lockName string, timeout int) error {
+	return nil
+}
+
+func (c *ConsulClient) ReleaseLock(lockName string) error {
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..d38f0f6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,485 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	ectdAPI             *v3Client.Client
+	keyReservations     map[string]*v3Client.LeaseID
+	watchedChannels     sync.Map
+	keyReservationsLock sync.RWMutex
+	lockToMutexMap      map[string]*v3Concurrency.Mutex
+	lockToSessionMap    map[string]*v3Concurrency.Session
+	lockToMutexLock     sync.Mutex
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(addr string, timeout int) (*EtcdClient, error) {
+	duration := GetDuration(timeout)
+
+	c, err := v3Client.New(v3Client.Config{
+		Endpoints:   []string{addr},
+		DialTimeout: duration,
+	})
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+
+	reservations := make(map[string]*v3Client.LeaseID)
+	lockMutexMap := make(map[string]*v3Concurrency.Mutex)
+	lockSessionMap := make(map[string]*v3Concurrency.Session)
+
+	return &EtcdClient{ectdAPI: c, keyReservations: reservations, lockToMutexMap: lockMutexMap,
+		lockToSessionMap: lockSessionMap}, nil
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+		return false
+	}
+	//cancel()
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+
+	resp, err := c.ectdAPI.Get(ctx, key)
+
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	for _, ev := range resp.Kvs {
+		// Only one value is returned
+		return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+	}
+	return nil, nil
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	var err error
+	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+	// that KV key permanent instead of automatically removing it after a lease expiration
+	c.keyReservationsLock.RLock()
+	leaseID, ok := c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+	if ok {
+		_, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
+	} else {
+		_, err = c.ectdAPI.Put(ctx, key, val)
+	}
+
+	if err != nil {
+		switch err {
+		case context.Canceled:
+			logger.Warnw("context-cancelled", log.Fields{"error": err})
+		case context.DeadlineExceeded:
+			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+		case v3rpcTypes.ErrEmptyKey:
+			logger.Warnw("etcd-client-error", log.Fields{"error": err})
+		default:
+			logger.Warnw("bad-endpoints", log.Fields{"error": err})
+		}
+		return err
+	}
+	return nil
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+
+	// delete the key
+	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
+		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+		return err
+	}
+	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
+	return nil
+}
+
+// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
+// the etcd API accepts only a string.  Timeout defines how long the function will wait for a response.  TTL
+// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
+// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
+// then the value assigned to that key will be returned.
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl int64) (interface{}, error) {
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return nil, fmt.Errorf("unexpected-type%T", value)
+	}
+
+	resp, err := c.ectdAPI.Grant(ctx, ttl)
+	if err != nil {
+		logger.Error(err)
+		return nil, err
+	}
+	// Register the lease id
+	c.keyReservationsLock.Lock()
+	c.keyReservations[key] = &resp.ID
+	c.keyReservationsLock.Unlock()
+
+	// Revoke lease if reservation is not successful
+	reservationSuccessful := false
+	defer func() {
+		if !reservationSuccessful {
+			if err = c.ReleaseReservation(context.Background(), key); err != nil {
+				logger.Error("cannot-release-lease")
+			}
+		}
+	}()
+
+	// Try to grap the Key with the above lease
+	c.ectdAPI.Txn(context.Background())
+	txn := c.ectdAPI.Txn(context.Background())
+	txn = txn.If(v3Client.Compare(v3Client.Version(key), "=", 0))
+	txn = txn.Then(v3Client.OpPut(key, val, v3Client.WithLease(resp.ID)))
+	txn = txn.Else(v3Client.OpGet(key))
+	result, er := txn.Commit()
+	if er != nil {
+		return nil, er
+	}
+
+	if !result.Succeeded {
+		// Verify whether we are already the owner of that Key
+		if len(result.Responses) > 0 &&
+			len(result.Responses[0].GetResponseRange().Kvs) > 0 {
+			kv := result.Responses[0].GetResponseRange().Kvs[0]
+			if string(kv.Value) == val {
+				reservationSuccessful = true
+				return value, nil
+			}
+			return kv.Value, nil
+		}
+	} else {
+		// Read the Key to ensure this is our Key
+		m, err := c.Get(ctx, key)
+		if err != nil {
+			return nil, err
+		}
+		if m != nil {
+			if m.Key == key && isEqual(m.Value, value) {
+				// My reservation is successful - register it.  For now, support is only for 1 reservation per key
+				// per session.
+				reservationSuccessful = true
+				return value, nil
+			}
+			// My reservation has failed.  Return the owner of that key
+			return m.Value, nil
+		}
+	}
+	return nil, nil
+}
+
+// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
+
+	for key, leaseID := range c.keyReservations {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// ReleaseReservation releases reservation for a specific key.
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+	// Get the leaseid using the key
+	logger.Debugw("Release-reservation", log.Fields{"key": key})
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.keyReservationsLock.Lock()
+	defer c.keyReservationsLock.Unlock()
+	if leaseID, ok = c.keyReservations[key]; !ok {
+		return nil
+	}
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
+		if err != nil {
+			logger.Error(err)
+			return err
+		}
+		delete(c.keyReservations, key)
+	}
+	return nil
+}
+
+// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
+// period specified when reserving the key
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+	// Get the leaseid using the key
+	var ok bool
+	var leaseID *v3Client.LeaseID
+	c.keyReservationsLock.RLock()
+	leaseID, ok = c.keyReservations[key]
+	c.keyReservationsLock.RUnlock()
+
+	if !ok {
+		return errors.New("key-not-reserved")
+	}
+
+	if leaseID != nil {
+		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
+		if err != nil {
+			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
+			return err
+		}
+	} else {
+		return errors.New("lease-expired")
+	}
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+	w := v3Client.NewWatcher(c.ectdAPI)
+	ctx, cancel := context.WithCancel(ctx)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug("channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug("start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug("stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes the KV store client
+func (c *EtcdClient) Close() {
+	if err := c.ectdAPI.Close(); err != nil {
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
+	}
+}
+
+func (c *EtcdClient) addLockName(lockName string, lock *v3Concurrency.Mutex, session *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	c.lockToMutexMap[lockName] = lock
+	c.lockToSessionMap[lockName] = session
+}
+
+func (c *EtcdClient) deleteLockName(lockName string) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	delete(c.lockToMutexMap, lockName)
+	delete(c.lockToSessionMap, lockName)
+}
+
+func (c *EtcdClient) getLock(lockName string) (*v3Concurrency.Mutex, *v3Concurrency.Session) {
+	c.lockToMutexLock.Lock()
+	defer c.lockToMutexLock.Unlock()
+	var lock *v3Concurrency.Mutex
+	var session *v3Concurrency.Session
+	if l, exist := c.lockToMutexMap[lockName]; exist {
+		lock = l
+	}
+	if s, exist := c.lockToSessionMap[lockName]; exist {
+		session = s
+	}
+	return lock, session
+}
+
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout int) error {
+	session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
+	mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
+	if err := mu.Lock(context.Background()); err != nil {
+		//cancel()
+		return err
+	}
+	c.addLockName(lockName, mu, session)
+	return nil
+}
+
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	lock, session := c.getLock(lockName)
+	var err error
+	if lock != nil {
+		if e := lock.Unlock(context.Background()); e != nil {
+			err = e
+		}
+	}
+	if session != nil {
+		if e := session.Close(); e != nil {
+			err = e
+		}
+	}
+	c.deleteLockName(lockName)
+
+	return err
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
new file mode 100644
index 0000000..cf9a95c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"fmt"
+	"time"
+)
+
+// GetDuration converts a timeout value from int to duration.  If the timeout value is
+// either not set of -ve then we default KV timeout (configurable) is used.
+func GetDuration(timeout int) time.Duration {
+	if timeout <= 0 {
+		return defaultKVGetTimeout * time.Second
+	}
+	return time.Duration(timeout) * time.Second
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
new file mode 100644
index 0000000..47fa3fb
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+//1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+//2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+//3. Dynamic log level change - for a given package (SetPackageLogLevel)
+//4. Provides a default logger for unregistered packages
+//5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+//6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
+// 1.  In the appropriate package add the following in the init section of the package.  The log level can be changed
+// and any number of default fields can be added as well. The log level specifies the lowest log level that will be
+// in the output while the fields will be automatically added to all log printouts.
+//
+//	log.AddPackage(mylog.JSON, log.WarnLevel, log.Fields{"anyFieldName": "any value"})
+//
+//2. In the calling package, just invoke any of the publicly available functions of the logger.  Here is an  example
+// to write an Info log with additional fields:
+//
+//log.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+//3. To dynamically change the log level, you can use 1)SetLogLevel from inside your package or 2) SetPackageLogLevel
+// from anywhere or 3)  SetAllLogLevel from anywhere.
+//
+
+package log
+
+import (
+	"errors"
+	"fmt"
+	zp "go.uber.org/zap"
+	zc "go.uber.org/zap/zapcore"
+	"path"
+	"runtime"
+	"strings"
+)
+
+type LogLevel int8
+
+const (
+	// DebugLevel logs a message at debug level
+	DebugLevel = LogLevel(iota)
+	// InfoLevel logs a message at info level
+	InfoLevel
+	// WarnLevel logs a message at warning level
+	WarnLevel
+	// ErrorLevel logs a message at error level
+	ErrorLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Logger represents an abstract logging interface.  Any logging implementation used
+// will need to abide by this interface
+type Logger interface {
+	Debug(...interface{})
+	Debugln(...interface{})
+	Debugf(string, ...interface{})
+	Debugw(string, Fields)
+
+	Info(...interface{})
+	Infoln(...interface{})
+	Infof(string, ...interface{})
+	Infow(string, Fields)
+
+	Warn(...interface{})
+	Warnln(...interface{})
+	Warnf(string, ...interface{})
+	Warnw(string, Fields)
+
+	Error(...interface{})
+	Errorln(...interface{})
+	Errorf(string, ...interface{})
+	Errorw(string, Fields)
+
+	Fatal(...interface{})
+	Fatalln(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalw(string, Fields)
+
+	With(Fields) Logger
+
+	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+	//
+	Warning(...interface{})
+	Warningln(...interface{})
+	Warningf(string, ...interface{})
+
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l LogLevel) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() LogLevel
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *logger
+var cfg zp.Config
+
+var loggers map[string]*logger
+var cfgs map[string]zp.Config
+
+type logger struct {
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
+}
+
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
+	switch l {
+	case DebugLevel:
+		return zp.NewAtomicLevelAt(zc.DebugLevel)
+	case InfoLevel:
+		return zp.NewAtomicLevelAt(zc.InfoLevel)
+	case WarnLevel:
+		return zp.NewAtomicLevelAt(zc.WarnLevel)
+	case ErrorLevel:
+		return zp.NewAtomicLevelAt(zc.ErrorLevel)
+	case FatalLevel:
+		return zp.NewAtomicLevelAt(zc.FatalLevel)
+	}
+	return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func logLevelToLevel(l LogLevel) zc.Level {
+	switch l {
+	case DebugLevel:
+		return zc.DebugLevel
+	case InfoLevel:
+		return zc.InfoLevel
+	case WarnLevel:
+		return zc.WarnLevel
+	case ErrorLevel:
+		return zc.ErrorLevel
+	case FatalLevel:
+		return zc.FatalLevel
+	}
+	return zc.ErrorLevel
+}
+
+func levelToLogLevel(l zc.Level) LogLevel {
+	switch l {
+	case zc.DebugLevel:
+		return DebugLevel
+	case zc.InfoLevel:
+		return InfoLevel
+	case zc.WarnLevel:
+		return WarnLevel
+	case zc.ErrorLevel:
+		return ErrorLevel
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToLogLevel(l string) (LogLevel, error) {
+	switch strings.ToUpper(l) {
+	case "DEBUG":
+		return DebugLevel, nil
+	case "INFO":
+		return InfoLevel, nil
+	case "WARN":
+		return WarnLevel, nil
+	case "ERROR":
+		return ErrorLevel, nil
+	case "FATAL":
+		return FatalLevel, nil
+	}
+	return 0, errors.New("Given LogLevel is invalid : " + l)
+}
+
+func LogLevelToString(l LogLevel) (string, error) {
+	switch l {
+	case DebugLevel:
+		return "DEBUG", nil
+	case InfoLevel:
+		return "INFO", nil
+	case WarnLevel:
+		return "WARN", nil
+	case ErrorLevel:
+		return "ERROR", nil
+	case FatalLevel:
+		return "FATAL", nil
+	}
+	return "", errors.New("Given LogLevel is invalid " + string(l))
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
+	return zp.Config{
+		Level:            logLevelToAtomicLevel(level),
+		Encoding:         outputType,
+		Development:      true,
+		OutputPaths:      []string{"stdout"},
+		ErrorOutputPaths: []string{"stderr"},
+		InitialFields:    defaultFields,
+		EncoderConfig: zc.EncoderConfig{
+			LevelKey:       "level",
+			MessageKey:     "msg",
+			TimeKey:        "ts",
+			CallerKey:      "caller",
+			StacktraceKey:  "stacktrace",
+			LineEnding:     zc.DefaultLineEnding,
+			EncodeLevel:    zc.LowercaseLevelEncoder,
+			EncodeTime:     zc.ISO8601TimeEncoder,
+			EncodeDuration: zc.SecondsDurationEncoder,
+			EncodeCaller:   zc.ShortCallerEncoder,
+		},
+	}
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked.  This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (Logger, error) {
+	// Build a custom config using zap
+	cfg = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	defaultLogger = &logger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+
+	return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map.  Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger.  If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func AddPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (Logger, error) {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*logger)
+	}
+
+	var pkgName string
+	for _, name := range pkgNames {
+		pkgName = name
+		break
+	}
+	if pkgName == "" {
+		pkgName, _, _, _ = getCallerInfo()
+	}
+
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName], nil
+	}
+
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	loggers[pkgName] = &logger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build(zp.AddCallerSkip(1))
+		if err != nil {
+			return err
+		}
+
+		// Update the existing zap logger instance
+		loggers[pkgName].log = l.Sugar()
+		loggers[pkgName].parent = l
+	}
+	return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+	i := 0
+	keys := make([]string, len(loggers))
+	for k := range loggers {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; !exist {
+		return fmt.Errorf("package-%s-not-registered", pkgName)
+	}
+
+	// Build a new logger
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("config-%s-not-registered", pkgName)
+	}
+
+	cfg := cfgs[pkgName]
+	for k, v := range defaultFields {
+		if cfg.InitialFields == nil {
+			cfg.InitialFields = make(map[string]interface{})
+		}
+		cfg.InitialFields[k] = v
+	}
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return err
+	}
+
+	// Update the existing zap logger instance
+	loggers[pkgName].log = l.Sugar()
+	loggers[pkgName].parent = l
+
+	return nil
+}
+
+func setLevel(cfg zp.Config, level LogLevel) {
+	switch level {
+	case DebugLevel:
+		cfg.Level.SetLevel(zc.DebugLevel)
+	case InfoLevel:
+		cfg.Level.SetLevel(zc.InfoLevel)
+	case WarnLevel:
+		cfg.Level.SetLevel(zc.WarnLevel)
+	case ErrorLevel:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	case FatalLevel:
+		cfg.Level.SetLevel(zc.FatalLevel)
+	default:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	}
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level LogLevel) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		setLevel(cfg, level)
+	}
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level LogLevel) {
+	// Get proper config
+	for _, cfg := range cfgs {
+		setLevel(cfg, level)
+	}
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
+		return levelToLogLevel(cfg.Level.Level()), nil
+	}
+	return 0, fmt.Errorf("unknown-package-%s", name)
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() LogLevel {
+	return levelToLogLevel(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level LogLevel) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("unregistered-package-%s", pkgName)
+	}
+	cfg := cfgs[pkgName]
+	setLevel(cfg, level)
+	return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level LogLevel) {
+	setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	if defaultLogger != nil {
+		if defaultLogger.parent != nil {
+			if err := defaultLogger.parent.Sync(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+	// Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+	// filename, then first look for the last log.go filename and then grab the caller info one level higher.
+	maxLevel := 3
+	skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+	pc := make([]uintptr, maxLevel)
+	n := runtime.Callers(skiplevel, pc)
+	packageName := ""
+	funcName := ""
+	fileName := ""
+	var line int
+	if n == 0 {
+		return packageName, fileName, funcName, line
+	}
+	frames := runtime.CallersFrames(pc[:n])
+	var frame runtime.Frame
+	var foundFrame runtime.Frame
+	more := true
+	for more {
+		frame, more = frames.Next()
+		_, fileName = path.Split(frame.File)
+		if fileName != "log.go" {
+			foundFrame = frame // First frame after log.go in the frame stack
+			break
+		}
+	}
+	parts := strings.Split(foundFrame.Function, ".")
+	pl := len(parts)
+	if pl >= 2 {
+		funcName = parts[pl-1]
+		if parts[pl-2][0] == '(' {
+			packageName = strings.Join(parts[0:pl-2], ".")
+		} else {
+			packageName = strings.Join(parts[0:pl-1], ".")
+		}
+	}
+
+	if strings.HasSuffix(packageName, ".init") {
+		packageName = strings.TrimSuffix(packageName, ".init")
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+
+	return packageName, fileName, funcName, foundFrame.Line
+}
+
+func getPackageLevelSugaredLogger() *zp.SugaredLogger {
+	pkgName, fileName, funcName, line := getCallerInfo()
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName].log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+	}
+	return defaultLogger.log.With("caller", fmt.Sprintf("%s.%s:%d", fileName, funcName, line))
+}
+
+func getPackageLevelLogger() Logger {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName]
+	}
+	return defaultLogger
+}
+
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l logger) With(keysAndValues Fields) Logger {
+	return logger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l logger) Debug(args ...interface{}) {
+	l.log.Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l logger) Debugln(args ...interface{}) {
+	l.log.Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l logger) Debugf(format string, args ...interface{}) {
+	l.log.Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Debugw(msg string, keysAndValues Fields) {
+	l.log.Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l logger) Info(args ...interface{}) {
+	l.log.Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l logger) Infoln(args ...interface{}) {
+	l.log.Info(args...)
+	//msg := fmt.Sprintln(args...)
+	//l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l logger) Infof(format string, args ...interface{}) {
+	l.log.Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Infow(msg string, keysAndValues Fields) {
+	l.log.Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l logger) Warn(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warnln(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l logger) Warnf(format string, args ...interface{}) {
+	l.log.Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Warnw(msg string, keysAndValues Fields) {
+	l.log.Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l logger) Error(args ...interface{}) {
+	l.log.Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l logger) Errorln(args ...interface{}) {
+	l.log.Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l logger) Errorf(format string, args ...interface{}) {
+	l.log.Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Errorw(msg string, keysAndValues Fields) {
+	l.log.Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l logger) Fatal(args ...interface{}) {
+	l.log.Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l logger) Fatalln(args ...interface{}) {
+	l.log.Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l logger) Fatalf(format string, args ...interface{}) {
+	l.log.Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l logger) Fatalw(msg string, keysAndValues Fields) {
+	l.log.Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l logger) Warning(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l logger) Warningln(args ...interface{}) {
+	l.log.Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l logger) Warningf(format string, args ...interface{}) {
+	l.log.Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l logger) V(level LogLevel) bool {
+	return l.parent.Core().Enabled(logLevelToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l logger) GetLogLevel() LogLevel {
+	return levelToLogLevel(cfgs[l.packageName].Level.Level())
+}
+
+// With returns a logger initialized with the key-value pairs
+func With(keysAndValues Fields) Logger {
+	return logger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Debug(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Debugw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Debugw(msg, serializeMap(keysAndValues)...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Info(args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Infof(format, args...)
+}
+
+//Infow logs a message with some additional context. The variadic key-value
+//pairs are treated as they are in With.
+func Infow(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Infow(msg, serializeMap(keysAndValues)...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Warnw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Warnw(msg, serializeMap(keysAndValues)...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Errorw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Errorw(msg, serializeMap(keysAndValues)...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func Fatalw(msg string, keysAndValues Fields) {
+	getPackageLevelSugaredLogger().Fatalw(msg, serializeMap(keysAndValues)...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	getPackageLevelSugaredLogger().Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	getPackageLevelSugaredLogger().Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func V(level LogLevel) bool {
+	return getPackageLevelLogger().V(level)
+}
+
+//GetLogLevel returns the log level of the invoking package
+func GetLogLevel() LogLevel {
+	return getPackageLevelLogger().GetLogLevel()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
new file mode 100644
index 0000000..79fefc5
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
@@ -0,0 +1,1337 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package ponresourcemanager
+
+import (
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+
+	bitmap "github.com/boljen/go-bitmap"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v3/pkg/techprofile"
+)
+
+const (
+	//Constants to identify resource pool
+	UNI_ID     = "UNI_ID"
+	ONU_ID     = "ONU_ID"
+	ALLOC_ID   = "ALLOC_ID"
+	GEMPORT_ID = "GEMPORT_ID"
+	FLOW_ID    = "FLOW_ID"
+
+	//Constants for passing command line arugments
+	OLT_MODEL_ARG = "--olt_model"
+	PATH_PREFIX   = "service/voltha/resource_manager/{%s}"
+
+	/*The path under which configuration data is stored is defined as technology/device agnostic.
+	  That means the path does not include any specific technology/device variable. Using technology/device
+	  agnostic path also makes northbound applications, that need to write to this path,
+	  technology/device agnostic.
+
+	  Default kv client of PonResourceManager reads from/writes to PATH_PREFIX defined above.
+	  That is why, an additional kv client (named KVStoreForConfig) is defined to read from the config path.
+	*/
+	PATH_PREFIX_FOR_CONFIG = "service/voltha/resource_manager/config"
+	/*The resource ranges for a given device model should be placed
+	  at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
+	  path on the KV store.
+	  If Resource Range parameters are to be read from the external KV store,
+	  they are expected to be stored in the following format.
+	  Note: All parameters are MANDATORY for now.
+	  constants used as keys to reference the resource range parameters from
+	  and external KV store.
+	*/
+	UNI_ID_START_IDX      = "uni_id_start"
+	UNI_ID_END_IDX        = "uni_id_end"
+	ONU_ID_START_IDX      = "onu_id_start"
+	ONU_ID_END_IDX        = "onu_id_end"
+	ONU_ID_SHARED_IDX     = "onu_id_shared"
+	ALLOC_ID_START_IDX    = "alloc_id_start"
+	ALLOC_ID_END_IDX      = "alloc_id_end"
+	ALLOC_ID_SHARED_IDX   = "alloc_id_shared"
+	GEMPORT_ID_START_IDX  = "gemport_id_start"
+	GEMPORT_ID_END_IDX    = "gemport_id_end"
+	GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
+	FLOW_ID_START_IDX     = "flow_id_start"
+	FLOW_ID_END_IDX       = "flow_id_end"
+	FLOW_ID_SHARED_IDX    = "flow_id_shared"
+	NUM_OF_PON_PORT       = "pon_ports"
+
+	/*
+	   The KV store backend is initialized with a path prefix and we need to
+	   provide only the suffix.
+	*/
+	PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
+
+	//resource path suffix
+	//Path on the KV store for storing alloc id ranges and resource pool for a given interface
+	//Format: <device_id>/alloc_id_pool/<pon_intf_id>
+	ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
+	//Path on the KV store for storing gemport id ranges and resource pool for a given interface
+	//Format: <device_id>/gemport_id_pool/<pon_intf_id>
+	GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
+	//Path on the KV store for storing onu id ranges and resource pool for a given interface
+	//Format: <device_id>/onu_id_pool/<pon_intf_id>
+	ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
+	//Path on the KV store for storing flow id ranges and resource pool for a given interface
+	//Format: <device_id>/flow_id_pool/<pon_intf_id>
+	FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
+
+	//Path on the KV store for storing list of alloc IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
+	ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
+
+	//Path on the KV store for storing list of gemport IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
+	GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
+
+	//Path on the KV store for storing list of Flow IDs for a given ONU
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
+	FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
+
+	//Flow Id info: Use to store more metadata associated with the flow_id
+	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
+	FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
+
+	//path on the kvstore to store onugem info map
+	//format: <device-id>/onu_gem_info/<intfid>
+	ONU_GEM_INFO_PATH = "{%s}/onu_gem_info/{%d}" // onu_gem/<(intfid)>
+
+	//Constants for internal usage.
+	PON_INTF_ID     = "pon_intf_id"
+	START_IDX       = "start_idx"
+	END_IDX         = "end_idx"
+	POOL            = "pool"
+	NUM_OF_PON_INTF = 16
+
+	KVSTORE_RETRY_TIMEOUT = 5
+	//Path on the KV store for storing reserved gem ports
+	//Format: reserved_gemport_ids
+	RESERVED_GEMPORT_IDS_PATH = "reserved_gemport_ids"
+)
+
+//type ResourceTypeIndex string
+//type ResourceType string
+
+type PONResourceManager struct {
+	//Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
+	Technology       string
+	DeviceType       string
+	DeviceID         string
+	Backend          string // ETCD, or consul
+	Host             string // host ip of the KV store
+	Port             int    // port number for the KV store
+	OLTModel         string
+	KVStore          *db.Backend
+	KVStoreForConfig *db.Backend
+	TechProfileMgr   tp.TechProfileIf // create object of *tp.TechProfileMgr
+
+	// Below attribute, pon_resource_ranges, should be initialized
+	// by reading from KV store.
+	PonResourceRanges  map[string]interface{}
+	SharedResourceMgrs map[string]*PONResourceManager
+	SharedIdxByType    map[string]string
+	IntfIDs            []uint32 // list of pon interface IDs
+	Globalorlocal      string
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+	log.Infow("kv-store-type", log.Fields{"store": storeType})
+	switch storeType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func SetKVClient(Technology string, Backend string, Host string, Port int, configClient bool) *db.Backend {
+	addr := Host + ":" + strconv.Itoa(Port)
+	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	// issue between kv store and backend , core is not calling NewBackend directly
+	kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
+	if err != nil {
+		log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
+		return nil
+	}
+
+	var pathPrefix string
+	if configClient {
+		pathPrefix = PATH_PREFIX_FOR_CONFIG
+	} else {
+		pathPrefix = fmt.Sprintf(PATH_PREFIX, Technology)
+	}
+
+	kvbackend := &db.Backend{
+		Client:     kvClient,
+		StoreType:  Backend,
+		Host:       Host,
+		Port:       Port,
+		Timeout:    KVSTORE_RETRY_TIMEOUT,
+		PathPrefix: pathPrefix}
+
+	return kvbackend
+}
+
+// NewPONResourceManager creates a new PON resource manager.
+func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
+	var PONMgr PONResourceManager
+	PONMgr.Technology = Technology
+	PONMgr.DeviceType = DeviceType
+	PONMgr.DeviceID = DeviceID
+	PONMgr.Backend = Backend
+	PONMgr.Host = Host
+	PONMgr.Port = Port
+	PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port, false)
+	if PONMgr.KVStore == nil {
+		log.Error("KV Client initilization failed")
+		return nil, errors.New("Failed to init KV client")
+	}
+	// init kv client to read from the config path
+	PONMgr.KVStoreForConfig = SetKVClient(Technology, Backend, Host, Port, true)
+	if PONMgr.KVStoreForConfig == nil {
+		log.Error("KV Config Client initilization failed")
+		return nil, errors.New("Failed to init KV Config client")
+	}
+	// Initialize techprofile for this technology
+	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Host, Port); PONMgr.TechProfileMgr == nil {
+		log.Error("Techprofile initialization failed")
+		return nil, errors.New("Failed to init tech profile")
+	}
+	PONMgr.PonResourceRanges = make(map[string]interface{})
+	PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
+	PONMgr.SharedIdxByType = make(map[string]string)
+	PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
+	PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
+	PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
+	PONMgr.OLTModel = DeviceType
+	return &PONMgr, nil
+}
+
+/*
+  Initialize PON resource ranges with config fetched from kv store.
+  return boolean: True if PON resource ranges initialized else false
+  Try to initialize the PON Resource Ranges from KV store based on the
+  OLT model key, if available
+*/
+
+func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore(ctx context.Context) bool {
+	//Initialize PON resource ranges with config fetched from kv store.
+	//:return boolean: True if PON resource ranges initialized else false
+	// Try to initialize the PON Resource Ranges from KV store based on the
+	// OLT model key, if available
+	if PONRMgr.OLTModel == "" {
+		log.Error("Failed to get OLT model")
+		return false
+	}
+	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
+	//get resource from kv store
+	Result, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err != nil {
+		log.Debugf("Error in fetching resource %s from KV strore", Path)
+		return false
+	}
+	if Result == nil {
+		log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
+		return false
+	}
+	//update internal ranges from kv ranges. If there are missing
+	// values in the KV profile, continue to use the defaults
+	Value, err := ToByte(Result.Value)
+	if err != nil {
+		log.Error("Failed to convert kvpair to byte string")
+		return false
+	}
+	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
+		log.Error("Failed to Unmarshal json byte")
+		return false
+	}
+	log.Debug("Init resource ranges from kvstore success")
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
+	/*
+	   Update the ranges for all reosurce type in the intermnal maps
+	   param: resource type start index
+	   param: start ID
+	   param: resource type end index
+	   param: end ID
+	   param: resource type shared index
+	   param: shared pool id
+	   param: global resource manager
+	*/
+	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
+
+	if StartID != 0 {
+		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
+			PONRMgr.PonResourceRanges[StartIDx] = StartID
+		}
+	}
+	if EndID != 0 {
+		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
+			PONRMgr.PonResourceRanges[EndIDx] = EndID
+		}
+	}
+	//if SharedPoolID != 0 {
+	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
+	//}
+	if RMgr != nil {
+		PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
+	}
+}
+
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
+	ONUIDEnd uint32,
+	ONUIDSharedPoolID uint32,
+	AllocIDStart uint32,
+	AllocIDEnd uint32,
+	AllocIDSharedPoolID uint32,
+	GEMPortIDStart uint32,
+	GEMPortIDEnd uint32,
+	GEMPortIDSharedPoolID uint32,
+	FlowIDStart uint32,
+	FlowIDEnd uint32,
+	FlowIDSharedPoolID uint32,
+	UNIIDStart uint32,
+	UNIIDEnd uint32,
+	NoOfPONPorts uint32,
+	IntfIDs []uint32) bool {
+
+	/*Initialize default PON resource ranges
+
+	  :param onu_id_start_idx: onu id start index
+	  :param onu_id_end_idx: onu id end index
+	  :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
+	  :param alloc_id_start_idx: alloc id start index
+	  :param alloc_id_end_idx: alloc id end index
+	  :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
+	  :param gemport_id_start_idx: gemport id start index
+	  :param gemport_id_end_idx: gemport id end index
+	  :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
+	  :param flow_id_start_idx: flow id start index
+	  :param flow_id_end_idx: flow id end index
+	  :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
+	  :param num_of_pon_ports: number of PON ports
+	  :param intf_ids: interfaces serviced by this manager
+	*/
+	PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+	log.Debug("Initialize default range values")
+	var i uint32
+	if IntfIDs == nil {
+		for i = 0; i < NoOfPONPorts; i++ {
+			PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
+		}
+	} else {
+		PONRMgr.IntfIDs = IntfIDs
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) InitDeviceResourcePool(ctx context.Context) error {
+
+	//Initialize resource pool for all PON ports.
+
+	log.Debug("Init resource ranges")
+
+	var err error
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ONU_ID,
+			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init ONU ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ALLOC_ID,
+			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init ALLOC ID resource pool ")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, GEMPORT_ID,
+			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init GEMPORT ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if err = PONRMgr.InitResourceIDPool(ctx, Intf, FLOW_ID,
+			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
+			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
+			log.Error("Failed to init FLOW ID resource pool")
+			return err
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) ClearDeviceResourcePool(ctx context.Context) error {
+
+	//Clear resource pool for all PON ports.
+
+	log.Debug("Clear resource ranges")
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ONU_ID); !status {
+			log.Error("Failed to clear ONU ID resource pool")
+			return errors.New("Failed to clear ONU ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ALLOC_ID); !status {
+			log.Error("Failed to clear ALLOC ID resource pool ")
+			return errors.New("Failed to clear ALLOC ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, GEMPORT_ID); !status {
+			log.Error("Failed to clear GEMPORT ID resource pool")
+			return errors.New("Failed to clear GEMPORT ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+
+	for _, Intf := range PONRMgr.IntfIDs {
+		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
+		if SharedPoolID != 0 {
+			Intf = SharedPoolID
+		}
+		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, FLOW_ID); !status {
+			log.Error("Failed to clear FLOW ID resource pool")
+			return errors.New("Failed to clear FLOW ID resource pool")
+		}
+		if SharedPoolID != 0 {
+			break
+		}
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) InitResourceIDPool(ctx context.Context, Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
+
+	/*Initialize Resource ID pool for a given Resource Type on a given PON Port
+
+	  :param pon_intf_id: OLT PON interface id
+	  :param resource_type: String to identify type of resource
+	  :param start_idx: start index for onu id pool
+	  :param end_idx: end index for onu id pool
+	  :return boolean: True if resource id pool initialized else false
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.InitResourceIDPool(ctx, Intf, ResourceType, StartID, EndID)
+	}
+
+	Path := PONRMgr.GetPath(Intf, ResourceType)
+	if Path == "" {
+		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		return fmt.Errorf("Failed to get path for resource type %s", ResourceType)
+	}
+
+	//In case of adapter reboot and reconciliation resource in kv store
+	//checked for its presence if not kv store update happens
+	Res, err := PONRMgr.GetResource(ctx, Path)
+	if (err == nil) && (Res != nil) {
+		log.Debugf("Resource %s already present in store ", Path)
+		return nil
+	} else {
+		var excluded []uint32
+		if ResourceType == GEMPORT_ID {
+			//get gem port ids defined in the KV store, if any, and exclude them from the gem port id pool
+			if reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx); defined {
+				excluded = reservedGemPortIds
+				log.Debugw("Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
+			}
+		}
+		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID, excluded)
+		if err != nil {
+			log.Errorf("Failed to format resource")
+			return err
+		}
+		// Add resource as json in kv store.
+		err = PONRMgr.KVStore.Put(ctx, Path, FormatResult)
+		if err == nil {
+			log.Debug("Successfuly posted to kv store")
+			return err
+		}
+	}
+
+	log.Debug("Error initializing pool")
+
+	return err
+}
+
+func (PONRMgr *PONResourceManager) getReservedGemPortIdsFromKVStore(ctx context.Context) ([]uint32, bool) {
+	var reservedGemPortIds []uint32
+	// read reserved gem ports from the config path
+	KvPair, err := PONRMgr.KVStoreForConfig.Get(ctx, RESERVED_GEMPORT_IDS_PATH)
+	if err != nil {
+		log.Errorw("Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	if KvPair == nil || KvPair.Value == nil {
+		//no reserved gem port defined in the store
+		return reservedGemPortIds, false
+	}
+	Val, err := kvstore.ToByte(KvPair.Value)
+	if err != nil {
+		log.Errorw("Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	if err = json.Unmarshal(Val, &reservedGemPortIds); err != nil {
+		log.Errorw("Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
+		return reservedGemPortIds, false
+	}
+	return reservedGemPortIds, true
+}
+
+func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32,
+	Excluded []uint32) ([]byte, error) {
+	/*
+	   Format resource as json.
+	   :param pon_intf_id: OLT PON interface id
+	   :param start_idx: start index for id pool
+	   :param end_idx: end index for id pool
+	   :Id values to be Excluded from the pool
+	   :return dictionary: resource formatted as map
+	*/
+	// Format resource as json to be stored in backend store
+	Resource := make(map[string]interface{})
+	Resource[PON_INTF_ID] = IntfID
+	Resource[START_IDX] = StartIDx
+	Resource[END_IDX] = EndIDx
+	/*
+	   Resource pool stored in backend store as binary string.
+	   Tracking the resource allocation will be done by setting the bits \
+	   in the byte array. The index set will be the resource number allocated.
+	*/
+	var TSData *bitmap.Threadsafe
+	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
+		log.Error("Failed to create a bitmap")
+		return nil, errors.New("Failed to create bitmap")
+	}
+	for _, excludedID := range Excluded {
+		if excludedID < StartIDx || excludedID > EndIDx {
+			log.Warnf("Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
+				StartIDx, EndIDx)
+			continue
+		}
+		PONRMgr.reserveID(TSData, StartIDx, excludedID)
+	}
+	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
+
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		log.Errorf("Failed to marshall resource")
+		return nil, err
+	}
+	return Value, err
+}
+func (PONRMgr *PONResourceManager) GetResource(ctx context.Context, Path string) (map[string]interface{}, error) {
+	/*
+	   Get resource from kv store.
+
+	   :param path: path to get resource
+	   :return: resource if resource present in kv store else None
+	*/
+	//get resource from kv store
+
+	var Value []byte
+	Result := make(map[string]interface{})
+	var Str string
+
+	Resource, err := PONRMgr.KVStore.Get(ctx, Path)
+	if (err != nil) || (Resource == nil) {
+		log.Debugf("Resource  unavailable at %s", Path)
+		return nil, err
+	}
+
+	Value, err = ToByte(Resource.Value)
+	if err != nil {
+		return nil, err
+	}
+
+	// decode resource fetched from backend store to dictionary
+	err = json.Unmarshal(Value, &Result)
+	if err != nil {
+		log.Error("Failed to decode resource")
+		return Result, err
+	}
+	/*
+	   resource pool in backend store stored as binary string whereas to
+	   access the pool to generate/release IDs it need to be converted
+	   as BitArray
+	*/
+	Str, err = ToString(Result[POOL])
+	if err != nil {
+		log.Error("Failed to conver to kv pair to string")
+		return Result, err
+	}
+	Decode64, _ := base64.StdEncoding.DecodeString(Str)
+	Result[POOL], err = ToByte(Decode64)
+	if err != nil {
+		log.Error("Failed to convert resource pool to byte")
+		return Result, err
+	}
+
+	return Result, err
+}
+
+func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
+	/*
+	   Get path for given resource type.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :return: path for given resource type
+	*/
+
+	/*
+	   Get the shared pool for the given resource type.
+	   all the resource ranges and the shared resource maps are initialized during the init.
+	*/
+	SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
+	if SharedPoolID != 0 {
+		IntfID = SharedPoolID
+	}
+	var Path string
+	if ResourceType == ONU_ID {
+		Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == ALLOC_ID {
+		Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == GEMPORT_ID {
+		Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else if ResourceType == FLOW_ID {
+		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
+	} else {
+		log.Error("Invalid resource pool identifier")
+	}
+	return Path
+}
+
+func (PONRMgr *PONResourceManager) GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
+	/*
+	   Create alloc/gemport/onu/flow id for given OLT PON interface.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :param num_of_id: required number of ids
+	   :return list/uint32/None: list, uint32 or None if resource type is
+	    alloc_id/gemport_id, onu_id or invalid type respectively
+	*/
+	if NumIDs < 1 {
+		log.Error("Invalid number of resources requested")
+		return nil, fmt.Errorf("Invalid number of resources requested %d", NumIDs)
+	}
+	// delegate to the master instance if sharing enabled across instances
+
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
+	}
+	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	if Path == "" {
+		log.Errorf("Failed to get path for resource type %s", ResourceType)
+		return nil, fmt.Errorf("Failed to get path for resource type %s", ResourceType)
+	}
+	log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
+	var Result []uint32
+	var NextID uint32
+	Resource, err := PONRMgr.GetResource(ctx, Path)
+	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
+		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+			log.Error("Failed to Generate ID")
+			return Result, err
+		}
+		Result = append(Result, NextID)
+	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
+		if NumIDs == 1 {
+			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+				log.Error("Failed to Generate ID")
+				return Result, err
+			}
+			Result = append(Result, NextID)
+		} else {
+			for NumIDs > 0 {
+				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+					log.Error("Failed to Generate ID")
+					return Result, err
+				}
+				Result = append(Result, NextID)
+				NumIDs--
+			}
+		}
+	} else {
+		log.Error("get resource failed")
+		return Result, err
+	}
+
+	//Update resource in kv store
+	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return nil, fmt.Errorf("Failed to update resource %s", Path)
+	}
+	return Result, nil
+}
+
+func checkValidResourceType(ResourceType string) bool {
+	KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
+
+	for _, v := range KnownResourceTypes {
+		if v == ResourceType {
+			return true
+		}
+	}
+	return false
+}
+
+func (PONRMgr *PONResourceManager) FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
+	/*
+	   Release alloc/gemport/onu/flow id for given OLT PON interface.
+	   :param pon_intf_id: OLT PON interface id
+	   :param resource_type: String to identify type of resource
+	   :param release_content: required number of ids
+	   :return boolean: True if all IDs in given release_content release else False
+	*/
+	if !checkValidResourceType(ResourceType) {
+		log.Error("Invalid resource type")
+		return false
+	}
+	if ReleaseContent == nil {
+		log.Debug("Nothing to release")
+		return true
+	}
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
+	}
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
+	if Path == "" {
+		log.Error("Failed to get path")
+		return false
+	}
+	Resource, err := PONRMgr.GetResource(ctx, Path)
+	if err != nil {
+		log.Error("Failed to get resource")
+		return false
+	}
+	for _, Val := range ReleaseContent {
+		PONRMgr.ReleaseID(Resource, Val)
+	}
+	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
+		log.Errorf("Free resource for %s failed", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateResource(ctx context.Context, Path string, Resource map[string]interface{}) error {
+	/*
+	   Update resource in resource kv store.
+	   :param path: path to update resource
+	   :param resource: resource need to be updated
+	   :return boolean: True if resource updated in kv store else False
+	*/
+	// TODO resource[POOL] = resource[POOL].bin
+	Value, err := json.Marshal(Resource)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+	err = PONRMgr.KVStore.Put(ctx, Path, Value)
+	if err != nil {
+		log.Error("failed to put data to kv store %s", Path)
+		return err
+	}
+	return nil
+}
+
+func (PONRMgr *PONResourceManager) ClearResourceIDPool(ctx context.Context, contIntfID uint32, ResourceType string) bool {
+	/*
+	   Clear Resource Pool for a given Resource Type on a given PON Port.
+	   :return boolean: True if removed else False
+	*/
+
+	// delegate to the master instance if sharing enabled across instances
+	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
+	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
+		return SharedResourceMgr.ClearResourceIDPool(ctx, contIntfID, ResourceType)
+	}
+	Path := PONRMgr.GetPath(contIntfID, ResourceType)
+	if Path == "" {
+		log.Error("Failed to get path")
+		return false
+	}
+
+	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
+		log.Errorf("Failed to delete resource %s", Path)
+		return false
+	}
+	log.Debugf("Cleared resource %s", Path)
+	return true
+}
+
+func (PONRMgr PONResourceManager) InitResourceMap(ctx context.Context, PONIntfONUID string) {
+	/*
+	   Initialize resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// initialize pon_intf_onu_id tuple to alloc_ids map
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var AllocIDs []byte
+	Result := PONRMgr.KVStore.Put(ctx, AllocIDPath, AllocIDs)
+	if Result != nil {
+		log.Error("Failed to update the KV store")
+		return
+	}
+	// initialize pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	var GEMPortIDs []byte
+	Result = PONRMgr.KVStore.Put(ctx, GEMPortIDPath, GEMPortIDs)
+	if Result != nil {
+		log.Error("Failed to update the KV store")
+		return
+	}
+}
+
+func (PONRMgr PONResourceManager) RemoveResourceMap(ctx context.Context, PONIntfONUID string) bool {
+	/*
+	   Remove resource map
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	*/
+	// remove pon_intf_onu_id tuple to alloc_ids map
+	var err error
+	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if err = PONRMgr.KVStore.Delete(ctx, AllocIDPath); err != nil {
+		log.Errorf("Failed to remove resource %s", AllocIDPath)
+		return false
+	}
+	// remove pon_intf_onu_id tuple to gemport_ids map
+	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	err = PONRMgr.KVStore.Delete(ctx, GEMPortIDPath)
+	if err != nil {
+		log.Errorf("Failed to remove resource %s", GEMPortIDPath)
+		return false
+	}
+
+	FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
+	if FlowIDs, err := PONRMgr.KVStore.List(ctx, FlowIDPath); err != nil {
+		for _, Flow := range FlowIDs {
+			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
+			if err = PONRMgr.KVStore.Delete(ctx, FlowIDInfoPath); err != nil {
+				log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
+				return false
+			}
+		}
+	}
+
+	if err = PONRMgr.KVStore.Delete(ctx, FlowIDPath); err != nil {
+		log.Errorf("Failed to remove resource %s", FlowIDPath)
+		return false
+	}
+
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of alloc_ids if available, else None
+	*/
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
+				return Data
+			}
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Error("Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of gemport IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	log.Debugf("Getting current gemports for %s", Path)
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				return Data
+			}
+		}
+	} else {
+		log.Errorf("Failed to get data from kvstore for %s", Path)
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(ctx context.Context, IntfONUID string) []uint32 {
+	/*
+	   Get currently configured flow ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :return list: List of Flow IDs if available, else None
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+
+	var Data []uint32
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, _ := ToByte(Value.Value)
+			if err = json.Unmarshal(Val, &Data); err != nil {
+				log.Error("Failed to unmarshal")
+				return Data
+			}
+		}
+	}
+	return Data
+}
+
+func (PONRMgr *PONResourceManager) GetFlowIDInfo(ctx context.Context, IntfONUID string, FlowID uint32, Data interface{}) error {
+	/*
+	   Get flow details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	   :param Data: Result
+	   :return error: nil if no error in getting from KV store
+	*/
+
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	Value, err := PONRMgr.KVStore.Get(ctx, Path)
+	if err == nil {
+		if Value != nil {
+			Val, err := ToByte(Value.Value)
+			if err != nil {
+				log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				return err
+			}
+			if err = json.Unmarshal(Val, Data); err != nil {
+				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(ctx context.Context, IntfONUID string, FlowID uint32) bool {
+	/*
+	   Get flow_id details configured for the ONU.
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow Id reference
+	*/
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+
+	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
+		log.Errorf("Falied to remove resource %s", Path)
+		return false
+	}
+	return true
+}
+
+func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(ctx context.Context, IntfONUID string, AllocIDs []uint32) error {
+	/*
+	   Update currently configured alloc ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param alloc_ids: list of alloc ids
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	Value, err = json.Marshal(AllocIDs)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(ctx context.Context, IntfONUID string, GEMPortIDs []uint32) error {
+	/*
+	   Update currently configured gemport ids for given pon_intf_onu_id
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param gemport_ids: list of gem port ids
+	*/
+
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	log.Debugf("Updating gemport ids for %s", Path)
+	Value, err = json.Marshal(GEMPortIDs)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
+	/*
+	   Check for a flow id in a given list of flow IDs.
+	   :param FLowIDList: List of Flow IDs
+	   :param FlowID: Flowd to check in the list
+	   : return true and the index if present false otherwise.
+	*/
+
+	for idx := range FlowIDList {
+		if FlowID == FlowIDList[idx] {
+			return true, uint32(idx)
+		}
+	}
+	return false, 0
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(ctx context.Context, IntfONUID string, FlowID uint32, Add bool) error {
+	/*
+	   Update the flow_id list of the ONU (add or remove flow_id from the list)
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: flow ID
+	   :param add: Boolean flag to indicate whether the flow_id should be
+	               added or removed from the list. Defaults to adding the flow.
+	*/
+	var Value []byte
+	var err error
+	var RetVal bool
+	var IDx uint32
+	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
+	FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(ctx, IntfONUID)
+
+	if Add {
+		if RetVal, _ = checkForFlowIDInList(FlowIDs, FlowID); RetVal {
+			return nil
+		}
+		FlowIDs = append(FlowIDs, FlowID)
+	} else {
+		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); !RetVal {
+			return nil
+		}
+		// delete the index and shift
+		FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
+	}
+	Value, err = json.Marshal(FlowIDs)
+	if err != nil {
+		log.Error("Failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(ctx context.Context, IntfONUID string, FlowID uint32, FlowData interface{}) error {
+	/*
+	   Update any metadata associated with the flow_id. The flow_data could be json
+	   or any of other data structure. The resource manager doesnt care
+	   :param pon_intf_onu_id: reference of PON interface id and onu id
+	   :param flow_id: Flow ID
+	   :param flow_data: Flow data blob
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
+	Value, err = json.Marshal(FlowData)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
+	/*
+	   Generate unique id having OFFSET as start
+	   :param resource: resource used to generate ID
+	   :return uint32: generated id
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		log.Error("Failed to convert resource to byte array")
+		return 0, err
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		log.Error("Failed to get data from byte array")
+		return 0, errors.New("Failed to get data from byte array")
+	}
+
+	Len := Data.Len()
+	var Idx int
+	for Idx = 0; Idx < Len; Idx++ {
+		if !Data.Get(Idx) {
+			break
+		}
+	}
+	Data.Set(Idx, true)
+	res := uint32(Resource[START_IDX].(float64))
+	Resource[POOL] = Data.Data(false)
+	log.Debugf("Generated ID for %d", (uint32(Idx) + res))
+	return (uint32(Idx) + res), err
+}
+
+func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
+	/*
+	   Release unique id having OFFSET as start index.
+	   :param resource: resource used to release ID
+	   :param unique_id: id need to be released
+	*/
+	ByteArray, err := ToByte(Resource[POOL])
+	if err != nil {
+		log.Error("Failed to convert resource to byte array")
+		return false
+	}
+	Data := bitmap.TSFromData(ByteArray, false)
+	if Data == nil {
+		log.Error("Failed to get resource pool")
+		return false
+	}
+	Idx := Id - uint32(Resource[START_IDX].(float64))
+	Data.Set(int(Idx), false)
+	Resource[POOL] = Data.Data(false)
+
+	return true
+}
+
+/* Reserves a unique id in the specified resource pool.
+:param Resource: resource used to reserve ID
+:param Id: ID to be reserved
+*/
+func (PONRMgr *PONResourceManager) reserveID(TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
+	Data := bitmap.TSFromData(TSData.Data(false), false)
+	if Data == nil {
+		log.Error("Failed to get resource pool")
+		return false
+	}
+	Idx := Id - StartIndex
+	Data.Set(int(Idx), true)
+	return true
+}
+
+func (PONRMgr *PONResourceManager) GetTechnology() string {
+	return PONRMgr.Technology
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
+	return ALLOC_ID
+}
+
+func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
+	return GEMPORT_ID
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+func (PONRMgr *PONResourceManager) AddOnuGemInfo(ctx context.Context, intfID uint32, onuGemData interface{}) error {
+	/*
+	   Update onugem info map,
+	   :param pon_intf_id: reference of PON interface id
+	   :param onuegmdata: onugem info map
+	*/
+	var Value []byte
+	var err error
+	Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
+	Value, err = json.Marshal(onuGemData)
+	if err != nil {
+		log.Error("failed to Marshal")
+		return err
+	}
+
+	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
+		log.Errorf("Failed to update resource %s", Path)
+		return err
+	}
+	return err
+}
+
+func (PONRMgr *PONResourceManager) GetOnuGemInfo(ctx context.Context, IntfId uint32, onuGemInfo interface{}) error {
+	/*
+	  Get onugeminfo map from kvstore
+	  :param intfid: refremce pon intfid
+	  :param onuGemInfo: onugem info to return from kv strore.
+	*/
+	var Val []byte
+
+	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
+	value, err := PONRMgr.KVStore.Get(ctx, path)
+	if err != nil {
+		log.Errorw("Failed to get from kv store", log.Fields{"path": path})
+		return err
+	} else if value == nil {
+		log.Debug("No onuinfo for path", log.Fields{"path": path})
+		return nil // returning nil as this could happen if there are no onus for the interface yet
+	}
+	if Val, err = kvstore.ToByte(value.Value); err != nil {
+		log.Error("Failed to convert to byte array")
+		return err
+	}
+
+	if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
+		log.Error("Failed to unmarshall")
+		return err
+	}
+	log.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
+	return err
+}
+
+func (PONRMgr *PONResourceManager) DelOnuGemInfoForIntf(ctx context.Context, intfId uint32) error {
+	/*
+	   delete onugem info for an interface from kvstore
+	   :param intfid: refremce pon intfid
+	*/
+
+	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
+	if err := PONRMgr.KVStore.Delete(ctx, path); err != nil {
+		log.Errorf("Falied to remove resource %s", path)
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
new file mode 100644
index 0000000..d11f8e4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
@@ -0,0 +1,141 @@
+ {
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "AdditionalBW_Auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "Hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
new file mode 100644
index 0000000..03a396d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
@@ -0,0 +1,336 @@
+Technology Profile Management
+Overview
+Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
+
+NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
+
+Example JSON :
+
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+Creating Technology Profiles
+Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
+
+Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
+
+ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
+
+Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
+
+# Store a Technology template using etcdctl
+jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/xgspon/64
+
+jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/xgspon/64
+
+
+# Store a Technology template using curl
+curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
+In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
+
+Listing Technical Profiles for a given Technology
+While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
+
+
+#List Tech profile 
+etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/xgspon/64
+
+
+# Example output
+A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
+
+# Display a specified Technology profile, using jq to pretty print
+kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
+
+etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/xgspon/64
+# Example outpout
+service/voltha/technology_profiles/xgspon/64/uni-1
+{
+  "name": "4QueueHybridProfileMap1",
+  "profile_type": "XPON",
+  "version": 1,
+  "num_gem_ports": 4,
+  "instance_control": {
+    "onu": "multi-instance",
+    "uni": "single-instance",
+    "max_gem_payload_size": "auto"
+  },
+  "us_scheduler": {
+    "additional_bw": "auto",
+    "direction": "UPSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "ds_scheduler": {
+    "additional_bw": "auto",
+    "direction": "DOWNSTREAM",
+    "priority": 0,
+    "weight": 0,
+    "q_sched_policy": "hybrid"
+  },
+  "upstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "max_threshold": 0,
+        "min_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 75,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ],
+  "downstream_gem_port_attribute_list": [
+    {
+      "pbit_map": "0b00000101",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 4,
+      "weight": 10,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00011010",
+      "aes_encryption": "True",
+      "scheduling_policy": "WRR",
+      "priority_q": 3,
+      "weight": 90,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b00100000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 2,
+      "weight": 0,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    },
+    {
+      "pbit_map": "0b11000000",
+      "aes_encryption": "True",
+      "scheduling_policy": "StrictPriority",
+      "priority_q": 1,
+      "weight": 25,
+      "discard_policy": "TailDrop",
+      "max_q_size": "auto",
+      "discard_config": {
+        "min_threshold": 0,
+        "max_threshold": 0,
+        "max_probability": 0
+      }
+    }
+  ]
+}
+
+#Deleting Technology Profiles
+A technology profile or a technology profile tree can be removed using etcdctl rm.
+
+# Remove a specific technology profile
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
+
+# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
+kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
new file mode 100644
index 0000000..4af2bd5
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package techprofile
+
+import (
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+)
+
+// tech profile default constants
+const (
+	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
+	DEFAULT_TECH_PROFILE_TABLE_ID = 64
+	defaultVersion                = 1.0
+	defaultLogLevel               = 0
+	defaultGemportsCount          = 1
+	defaultPbits                  = "0b11111111"
+
+	defaultKVStoreTimeout = 5 //in seconds
+
+	// Tech profile path prefix in kv store
+	defaultKVPathPrefix = "service/voltha/technology_profiles"
+
+	// Tech profile path in kv store
+	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
+
+	// Tech profile instance path in kv store
+	// Format: <technology>/<tech_profile_tableID>/<uni_port_name>
+	defaultTPInstanceKVPath = "%s/%d/%s"
+)
+
+//Tech-Profile JSON String Keys
+// NOTE: Tech profile templeate JSON file should comply with below keys
+const (
+	NAME                               = "name"
+	PROFILE_TYPE                       = "profile_type"
+	VERSION                            = "version"
+	NUM_GEM_PORTS                      = "num_gem_ports"
+	INSTANCE_CONTROL                   = "instance_control"
+	US_SCHEDULER                       = "us_scheduler"
+	DS_SCHEDULER                       = "ds_scheduler"
+	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
+	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
+	ONU                                = "onu"
+	UNI                                = "uni"
+	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
+	DIRECTION                          = "direction"
+	ADDITIONAL_BW                      = "additional_bw"
+	PRIORITY                           = "priority"
+	Q_SCHED_POLICY                     = "q_sched_policy"
+	WEIGHT                             = "weight"
+	PBIT_MAP                           = "pbit_map"
+	DISCARD_CONFIG                     = "discard_config"
+	MAX_THRESHOLD                      = "max_threshold"
+	MIN_THRESHOLD                      = "min_threshold"
+	MAX_PROBABILITY                    = "max_probability"
+	DISCARD_POLICY                     = "discard_policy"
+	PRIORITY_Q                         = "priority_q"
+	SCHEDULING_POLICY                  = "scheduling_policy"
+	MAX_Q_SIZE                         = "max_q_size"
+	AES_ENCRYPTION                     = "aes_encryption"
+)
+
+// TechprofileFlags represents the set of configurations used
+type TechProfileFlags struct {
+	KVStoreHost          string
+	KVStorePort          int
+	KVStoreType          string
+	KVStoreTimeout       int
+	KVBackend            *db.Backend
+	TPKVPathPrefix       string
+	TPFileKVPath         string
+	TPInstanceKVPath     string
+	DefaultTPName        string
+	TPVersion            int
+	NumGemPorts          uint32
+	DefaultPbits         []string
+	LogLevel             int
+	DefaultTechProfileID uint32
+	DefaultNumGemPorts   uint32
+}
+
+func NewTechProfileFlags(KVStoreType string, KVStoreHost string, KVStorePort int) *TechProfileFlags {
+	// initialize with default values
+	var techProfileFlags = TechProfileFlags{
+		KVBackend:            nil,
+		KVStoreHost:          KVStoreHost,
+		KVStorePort:          KVStorePort,
+		KVStoreType:          KVStoreType,
+		KVStoreTimeout:       defaultKVStoreTimeout,
+		DefaultTPName:        defaultTechProfileName,
+		TPKVPathPrefix:       defaultKVPathPrefix,
+		TPVersion:            defaultVersion,
+		TPFileKVPath:         defaultTechProfileKVPath,
+		TPInstanceKVPath:     defaultTPInstanceKVPath,
+		DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
+		DefaultNumGemPorts:   defaultGemportsCount,
+		DefaultPbits:         []string{defaultPbits},
+		LogLevel:             defaultLogLevel,
+	}
+
+	return &techProfileFlags
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
new file mode 100644
index 0000000..ba8855f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
@@ -0,0 +1,923 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+)
+
+// Interface to pon resource manager APIs
+type iPonResourceMgr interface {
+	GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
+	GetResourceTypeAllocID() string
+	GetResourceTypeGemPortID() string
+	GetTechnology() string
+}
+
+type Direction int32
+
+const (
+	Direction_UPSTREAM      Direction = 0
+	Direction_DOWNSTREAM    Direction = 1
+	Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[Direction]string{
+	0: "UPSTREAM",
+	1: "DOWNSTREAM",
+	2: "BIDIRECTIONAL",
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[SchedulingPolicy]string{
+	0: "WRR",
+	1: "StrictPriority",
+	2: "Hybrid",
+}
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[AdditionalBW]string{
+	0: "AdditionalBW_None",
+	1: "AdditionalBW_NA",
+	2: "AdditionalBW_BestEffort",
+	3: "AdditionalBW_Auto",
+}
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[DiscardPolicy]string{
+	0: "TailDrop",
+	1: "WTailDrop",
+	2: "Red",
+	3: "WRed",
+}
+
+// Required uniPortName format
+var uniPortNameFormat = regexp.MustCompile(`^pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
+
+/*
+type InferredAdditionBWIndication int32
+
+const (
+	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
+	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
+	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+	0: "InferredAdditionBWIndication_None",
+	1: "InferredAdditionBWIndication_Assured",
+	2: "InferredAdditionBWIndication_BestEffort",
+}
+*/
+// instance control defaults
+const (
+	defaultOnuInstance    = "multi-instance"
+	defaultUniInstance    = "single-instance"
+	defaultGemPayloadSize = "auto"
+)
+
+const MAX_GEM_PAYLOAD = "max_gem_payload_size"
+
+type InstanceControl struct {
+	Onu               string `json:"ONU"`
+	Uni               string `json:"uni"`
+	MaxGemPayloadSize string `json:"max_gem_payload_size"`
+}
+
+// default discard config constants
+const (
+	defaultMinThreshold   = 0
+	defaultMaxThreshold   = 0
+	defaultMaxProbability = 0
+)
+
+type DiscardConfig struct {
+	MinThreshold   int `json:"min_threshold"`
+	MaxThreshold   int `json:"max_threshold"`
+	MaxProbability int `json:"max_probability"`
+}
+
+// default scheduler contants
+const (
+	defaultAdditionalBw     = AdditionalBW_AdditionalBW_BestEffort
+	defaultPriority         = 0
+	defaultWeight           = 0
+	defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
+)
+
+type Scheduler struct {
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
+}
+
+// default GEM attribute constants
+const (
+	defaultAESEncryption     = "True"
+	defaultPriorityQueue     = 0
+	defaultQueueWeight       = 0
+	defaultMaxQueueSize      = "auto"
+	defaultdropPolicy        = DiscardPolicy_TailDrop
+	defaultSchedulePolicy    = SchedulingPolicy_WRR
+	defaultIsMulticast       = "False"
+	defaultAccessControlList = "224.0.0.0-239.255.255.255"
+	defaultMcastGemID        = 4069
+)
+
+type GemPortAttribute struct {
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    uint32        `json:"priority_q"`
+	Weight           uint32        `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
+}
+
+type iScheduler struct {
+	AllocID      uint32 `json:"alloc_id"`
+	Direction    string `json:"direction"`
+	AdditionalBw string `json:"additional_bw"`
+	Priority     uint32 `json:"priority"`
+	Weight       uint32 `json:"weight"`
+	QSchedPolicy string `json:"q_sched_policy"`
+}
+type iGemPortAttribute struct {
+	GemportID        uint32        `json:"gemport_id"`
+	MaxQueueSize     string        `json:"max_q_size"`
+	PbitMap          string        `json:"pbit_map"`
+	AesEncryption    string        `json:"aes_encryption"`
+	SchedulingPolicy string        `json:"scheduling_policy"`
+	PriorityQueue    uint32        `json:"priority_q"`
+	Weight           uint32        `json:"weight"`
+	DiscardPolicy    string        `json:"discard_policy"`
+	DiscardConfig    DiscardConfig `json:"discard_config"`
+	IsMulticast      string        `json:"is_multicast"`
+	DControlList     string        `json:"dynamic_access_control_list"`
+	SControlList     string        `json:"static_access_control_list"`
+	McastGemID       uint32        `json:"multicast_gem_id"`
+}
+
+type TechProfileMgr struct {
+	config      *TechProfileFlags
+	resourceMgr iPonResourceMgr
+}
+type DefaultTechProfile struct {
+	Name                           string             `json:"name"`
+	ProfileType                    string             `json:"profile_type"`
+	Version                        int                `json:"version"`
+	NumGemPorts                    uint32             `json:"num_gem_ports"`
+	InstanceCtrl                   InstanceControl    `json:"instance_control"`
+	UsScheduler                    Scheduler          `json:"us_scheduler"`
+	DsScheduler                    Scheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+type TechProfile struct {
+	Name                           string              `json:"name"`
+	SubscriberIdentifier           string              `json:"subscriber_identifier"`
+	ProfileType                    string              `json:"profile_type"`
+	Version                        int                 `json:"version"`
+	NumGemPorts                    uint32              `json:"num_gem_ports"`
+	InstanceCtrl                   InstanceControl     `json:"instance_control"`
+	UsScheduler                    iScheduler          `json:"us_scheduler"`
+	DsScheduler                    iScheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
+}
+
+func (t *TechProfileMgr) SetKVClient() *db.Backend {
+	addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
+	kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
+	if err != nil {
+		log.Errorw("failed-to-create-kv-client",
+			log.Fields{
+				"type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
+				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
+				"error": err.Error(),
+			})
+		return nil
+	}
+	return &db.Backend{
+		Client:     kvClient,
+		StoreType:  t.config.KVStoreType,
+		Host:       t.config.KVStoreHost,
+		Port:       t.config.KVStorePort,
+		Timeout:    t.config.KVStoreTimeout,
+		PathPrefix: t.config.TPKVPathPrefix}
+
+	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
+	            issue between kv store and backend , core is not calling NewBackend directly
+		   kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
+										t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
+	*/
+}
+
+func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
+
+	log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+	switch storeType {
+	case "consul":
+		return kvstore.NewConsulClient(address, timeout)
+	case "etcd":
+		return kvstore.NewEtcdClient(address, timeout)
+	}
+	return nil, errors.New("unsupported-kv-store")
+}
+
+func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) {
+	var techprofileObj TechProfileMgr
+	log.Debug("Initializing techprofile Manager")
+	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort)
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+	if techprofileObj.config.KVBackend == nil {
+		log.Error("Failed to initialize KV backend\n")
+		return nil, errors.New("KV backend init failed")
+	}
+	techprofileObj.resourceMgr = resourceMgr
+	log.Debug("Initializing techprofile object instance success")
+	return &techprofileObj, nil
+}
+
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+	return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
+}
+
+func (t *TechProfileMgr) GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (*TechProfile, error) {
+	var KvTpIns TechProfile
+	var resPtr *TechProfile = &KvTpIns
+	var err error
+	var kvResult *kvstore.KVPair
+
+	kvResult, _ = t.config.KVBackend.Get(ctx, path)
+	if kvResult == nil {
+		log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+		return nil, nil
+	} else {
+		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
+			if err = json.Unmarshal(value, resPtr); err != nil {
+				log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				return nil, errors.New("error-unmarshal-kv-result")
+			} else {
+				return resPtr, nil
+			}
+		}
+	}
+	return nil, err
+}
+
+func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	tpInstanceJson, err := json.Marshal(*tpInstance)
+	if err == nil {
+		// Backend will convert JSON byte array into string format
+		log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
+	} else {
+		log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+	}
+	return err
+}
+func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
+	var kvtechprofile DefaultTechProfile
+	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
+	log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	kvresult, err := t.config.KVBackend.Get(ctx, key)
+	if err != nil {
+		log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		return nil
+	}
+	if kvresult != nil {
+		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
+		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
+			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
+				log.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				return nil
+			}
+
+			log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			return &kvtechprofile
+		}
+	}
+	return nil
+}
+
+func (t *TechProfileMgr) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error) {
+	var tpInstance *TechProfile
+	log.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+
+	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
+	if !uniPortNameFormat.Match([]byte(uniPortName)) {
+		log.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		return nil, errors.New("uni-port-name-not-confirming-to-format")
+	}
+
+	tp := t.getTPFromKVStore(ctx, techProfiletblID)
+	if tp != nil {
+		if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
+			log.Error("invalid-instance-ctrl-attr--using-default-tp")
+			tp = t.getDefaultTechProfile()
+		} else {
+			log.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+		}
+	} else {
+		log.Info("tp-not-found-on-kv--creating-default-tp")
+		tp = t.getDefaultTechProfile()
+	}
+	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
+		log.Error("tp-intance-allocation-failed")
+		return nil, errors.New("tp-intance-allocation-failed")
+	}
+	if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
+		log.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+		return nil, errors.New("error-adding-tp-to-kv-store")
+	}
+	log.Infow("tp-added-to-kv-store-successfully",
+		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
+	return tpInstance, nil
+}
+
+func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	return t.config.KVBackend.Delete(ctx, path)
+}
+
+func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
+	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
+		log.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		return errors.New("invalid-onu-instance-ctl-attr")
+	}
+
+	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
+		log.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		return errors.New("invalid-uni-instance-ctl-attr")
+	}
+
+	if instCtl.Uni == "multi-instance" {
+		log.Error("uni-multi-instance-tp-not-supported")
+		return errors.New("uni-multi-instance-tp-not-supported")
+	}
+
+	return nil
+}
+
+func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
+
+	var usGemPortAttributeList []iGemPortAttribute
+	var dsGemPortAttributeList []iGemPortAttribute
+	var dsMulticastGemAttributeList []iGemPortAttribute
+	var dsUnicastGemAttributeList []iGemPortAttribute
+	var tcontIDs []uint32
+	var gemPorts []uint32
+	var err error
+
+	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+
+	if tp.InstanceCtrl.Onu == "multi-instance" {
+		if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			return nil
+		}
+	} else { // "single-instance"
+		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
+			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			return nil
+		} else if tpInst == nil {
+			// No "single-instance" tp found on one any uni port for the given TP ID
+			// Allocate a new TcontID or AllocID
+			if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
+				log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				return nil
+			}
+		} else {
+			// Use the alloc-id from the existing TpInstance
+			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
+		}
+	}
+	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	if gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
+		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		return nil
+	}
+	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
+				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
+				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
+				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
+				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
+				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
+				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
+	}
+
+	log.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	//put multicast and unicast downstream GEM port attributes in different lists first
+	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
+		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
+			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
+				iGemPortAttribute{
+					McastGemID:       tp.DownstreamGemPortAttributeList[index].McastGemID,
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig,
+					IsMulticast:      tp.DownstreamGemPortAttributeList[index].IsMulticast,
+					DControlList:     tp.DownstreamGemPortAttributeList[index].DControlList,
+					SControlList:     tp.DownstreamGemPortAttributeList[index].SControlList})
+		} else {
+			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
+				iGemPortAttribute{
+					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
+					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
+					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
+					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
+					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
+					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
+					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
+					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
+		}
+	}
+	//add unicast downstream GEM ports to dsGemPortAttributeList
+	for index := 0; index < int(tp.NumGemPorts); index++ {
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			iGemPortAttribute{GemportID: gemPorts[index],
+				MaxQueueSize:     dsUnicastGemAttributeList[index].MaxQueueSize,
+				PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
+				AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
+				SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
+				PriorityQueue:    dsUnicastGemAttributeList[index].PriorityQueue,
+				Weight:           dsUnicastGemAttributeList[index].Weight,
+				DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
+				DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
+	}
+	//add multicast GEM ports to dsGemPortAttributeList afterwards
+	for k := range dsMulticastGemAttributeList {
+		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
+	}
+
+	return &TechProfile{
+		SubscriberIdentifier: uniPortName,
+		Name:                 tp.Name,
+		ProfileType:          tp.ProfileType,
+		Version:              tp.Version,
+		NumGemPorts:          tp.NumGemPorts,
+		InstanceCtrl:         tp.InstanceCtrl,
+		UsScheduler: iScheduler{
+			AllocID:      tcontIDs[0],
+			Direction:    tp.UsScheduler.Direction,
+			AdditionalBw: tp.UsScheduler.AdditionalBw,
+			Priority:     tp.UsScheduler.Priority,
+			Weight:       tp.UsScheduler.Weight,
+			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
+		DsScheduler: iScheduler{
+			AllocID:      tcontIDs[0],
+			Direction:    tp.DsScheduler.Direction,
+			AdditionalBw: tp.DsScheduler.AdditionalBw,
+			Priority:     tp.DsScheduler.Priority,
+			Weight:       tp.DsScheduler.Weight,
+			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+// getSingleInstanceTp returns another TpInstance for an ONU on a different
+// uni port for the same TP ID, if it finds one, else nil.
+func (t *TechProfileMgr) getSingleInstanceTp(ctx context.Context, tpPath string) (*TechProfile, error) {
+	var tpInst TechProfile
+
+	// For example:
+	// tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
+	// is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
+	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
+	kvPairs, _ := t.config.KVBackend.List(ctx, uniPathSlice[0])
+
+	// Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
+	for keyPath, kvPair := range kvPairs {
+		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+			if err = json.Unmarshal(value, &tpInst); err != nil {
+				log.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				return nil, errors.New("error-unmarshal-kv-pair")
+			} else {
+				log.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				return &tpInst, nil
+			}
+		}
+	}
+	return nil, nil
+}
+
+func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
+
+	var usGemPortAttributeList []GemPortAttribute
+	var dsGemPortAttributeList []GemPortAttribute
+
+	for _, pbit := range t.config.DefaultPbits {
+		log.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+		usGemPortAttributeList = append(usGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability}})
+		dsGemPortAttributeList = append(dsGemPortAttributeList,
+			GemPortAttribute{
+				MaxQueueSize:     defaultMaxQueueSize,
+				PbitMap:          pbit,
+				AesEncryption:    defaultAESEncryption,
+				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
+				PriorityQueue:    defaultPriorityQueue,
+				Weight:           defaultQueueWeight,
+				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
+				DiscardConfig: DiscardConfig{
+					MinThreshold:   defaultMinThreshold,
+					MaxThreshold:   defaultMaxThreshold,
+					MaxProbability: defaultMaxProbability},
+				IsMulticast:  defaultIsMulticast,
+				DControlList: defaultAccessControlList,
+				SControlList: defaultAccessControlList,
+				McastGemID:   defaultMcastGemID})
+	}
+	return &DefaultTechProfile{
+		Name:        t.config.DefaultTPName,
+		ProfileType: t.resourceMgr.GetTechnology(),
+		Version:     t.config.TPVersion,
+		NumGemPorts: uint32(len(usGemPortAttributeList)),
+		InstanceCtrl: InstanceControl{
+			Onu:               defaultOnuInstance,
+			Uni:               defaultUniInstance,
+			MaxGemPayloadSize: defaultGemPayloadSize},
+		UsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_UPSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		DsScheduler: Scheduler{
+			Direction:    Direction_name[Direction_DOWNSTREAM],
+			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
+			Priority:     defaultPriority,
+			Weight:       defaultWeight,
+			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
+		UpstreamGemPortAttributeList:   usGemPortAttributeList,
+		DownstreamGemPortAttributeList: dsGemPortAttributeList}
+}
+
+func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+	var result int32 = -1
+
+	if paramType == "direction" {
+		for key, val := range tp_pb.Direction_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "discard_policy" {
+		for key, val := range tp_pb.DiscardPolicy_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else if paramType == "sched_policy" {
+		for key, val := range tp_pb.SchedulingPolicy_value {
+			if key == paramKey {
+				log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				result = val
+			}
+		}
+	} else if paramType == "additional_bw" {
+		for key, val := range tp_pb.AdditionalBW_value {
+			if key == paramKey {
+				result = val
+			}
+		}
+	} else {
+		log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		return -1
+	}
+	log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	return result
+}
+
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
+	if dir == -1 {
+		log.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+	}
+
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
+	if bw == -1 {
+		log.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+	}
+
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+	}
+
+	return &tp_pb.SchedulerConfig{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.UsScheduler.Priority,
+		Weight:       tpInstance.UsScheduler.Weight,
+		SchedPolicy:  policy}, nil
+}
+
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
+	if dir == -1 {
+		log.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+	}
+
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
+	if bw == -1 {
+		log.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+	}
+
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+	if policy == -1 {
+		log.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+	}
+
+	return &tp_pb.SchedulerConfig{
+		Direction:    dir,
+		AdditionalBw: bw,
+		Priority:     tpInstance.DsScheduler.Priority,
+		Weight:       tpInstance.DsScheduler.Weight,
+		SchedPolicy:  policy}, nil
+}
+
+func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+	ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
+
+	tSched := &tp_pb.TrafficScheduler{
+		Direction:          SchedCfg.Direction,
+		AllocId:            tpInstance.UsScheduler.AllocID,
+		TrafficShapingInfo: ShapingCfg,
+		Scheduler:          SchedCfg}
+
+	return tSched
+}
+
+func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+
+	var encryp bool
+	if Dir == tp_pb.Direction_UPSTREAM {
+		// upstream GEM ports
+		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			if schedPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			}
+
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			if discardPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
+				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
+				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
+				Priority:      tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
+				Weight:        tp.UpstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
+			})
+		}
+		log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	} else if Dir == tp_pb.Direction_DOWNSTREAM {
+		//downstream GEM ports
+		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+		GemPorts := make([]*tp_pb.TrafficQueue, 0)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+				//do not take multicast GEM ports. They are handled separately.
+				continue
+			}
+			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+				encryp = true
+			} else {
+				encryp = false
+			}
+
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			if schedPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			}
+
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			if discardPolicy == -1 {
+				log.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			}
+
+			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
+				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+				AesEncryption: encryp,
+				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
+				Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+				Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
+			})
+		}
+		log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		return GemPorts, nil
+	}
+
+	log.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
+	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
+}
+
+//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
+func isMulticastGem(isMulticastAttrValue string) bool {
+	return isMulticastAttrValue != "" &&
+		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
+}
+
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
+	var encryp bool
+	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
+	for Count := 0; Count < NumGemPorts; Count++ {
+		if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
+			continue
+		}
+		if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
+			encryp = true
+		} else {
+			encryp = false
+		}
+		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
+			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
+			AesEncryption: encryp,
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
+			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+		})
+	}
+	log.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	return mcastTrafficQueues
+}
+
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
+	UsScheduler, _ := tpm.GetUsScheduler(tp)
+
+	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
+		AllocId:   tp.UsScheduler.AllocID,
+		Scheduler: UsScheduler}
+}
+
+func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
+	/*
+	   Function to get the Gemport ID mapped to a pbit.
+	*/
+	if Dir == tp_pb.Direction_UPSTREAM {
+		// upstream GEM ports
+		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			NumPbitMaps := len(tp.UpstreamGemPortAttributeList[Count].PbitMap)
+			for ICount := 2; ICount < NumPbitMaps; ICount++ {
+				if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+						log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
+						return tp.UpstreamGemPortAttributeList[Count].GemportID
+					}
+				}
+			}
+		}
+	} else if Dir == tp_pb.Direction_DOWNSTREAM {
+		//downstream GEM ports
+		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
+		for Count := 0; Count < NumGemPorts; Count++ {
+			NumPbitMaps := len(tp.DownstreamGemPortAttributeList[Count].PbitMap)
+			for ICount := 2; ICount < NumPbitMaps; ICount++ {
+				if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
+					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
+						log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
+						return tp.DownstreamGemPortAttributeList[Count].GemportID
+					}
+				}
+			}
+		}
+	}
+	log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	return 0
+}
+
+// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
+func (t *TechProfileMgr) FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile {
+	var tp TechProfile
+	onuTpInstancePath := fmt.Sprintf("%s/%d/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), techProfiletblID, ponIntf, onuID)
+
+	if kvPairs, _ := t.config.KVBackend.List(ctx, onuTpInstancePath); kvPairs != nil {
+		tpInstances := make([]TechProfile, 0, len(kvPairs))
+		for kvPath, kvPair := range kvPairs {
+			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
+				if err = json.Unmarshal(value, &tp); err != nil {
+					log.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+					continue
+				} else {
+					tpInstances = append(tpInstances, tp)
+				}
+			}
+		}
+		return tpInstances
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
new file mode 100644
index 0000000..e605d49
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
@@ -0,0 +1,41 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package techprofile
+
+import (
+	"context"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+)
+
+type TechProfileIf interface {
+	SetKVClient() *db.Backend
+	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+	GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (*TechProfile, error)
+	CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error)
+	DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
+	GetprotoBufParamValue(paramType string, paramKey string) int32
+	GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
+		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
+	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
+	GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+	FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/openolt/openolt.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/openolt/openolt.pb.go
new file mode 100644
index 0000000..c22549c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/openolt/openolt.pb.go
@@ -0,0 +1,4592 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/openolt.proto
+
+package openolt
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	tech_profile "github.com/opencord/voltha-protos/v3/go/tech_profile"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// SchedulerConfig from public import voltha_protos/tech_profile.proto
+type SchedulerConfig = tech_profile.SchedulerConfig
+
+// TrafficShapingInfo from public import voltha_protos/tech_profile.proto
+type TrafficShapingInfo = tech_profile.TrafficShapingInfo
+
+// TrafficScheduler from public import voltha_protos/tech_profile.proto
+type TrafficScheduler = tech_profile.TrafficScheduler
+
+// TrafficSchedulers from public import voltha_protos/tech_profile.proto
+type TrafficSchedulers = tech_profile.TrafficSchedulers
+
+// TailDropDiscardConfig from public import voltha_protos/tech_profile.proto
+type TailDropDiscardConfig = tech_profile.TailDropDiscardConfig
+
+// RedDiscardConfig from public import voltha_protos/tech_profile.proto
+type RedDiscardConfig = tech_profile.RedDiscardConfig
+
+// WRedDiscardConfig from public import voltha_protos/tech_profile.proto
+type WRedDiscardConfig = tech_profile.WRedDiscardConfig
+
+// DiscardConfig from public import voltha_protos/tech_profile.proto
+type DiscardConfig = tech_profile.DiscardConfig
+type DiscardConfig_TailDropDiscardConfig = tech_profile.DiscardConfig_TailDropDiscardConfig
+type DiscardConfig_RedDiscardConfig = tech_profile.DiscardConfig_RedDiscardConfig
+type DiscardConfig_WredDiscardConfig = tech_profile.DiscardConfig_WredDiscardConfig
+
+// TrafficQueue from public import voltha_protos/tech_profile.proto
+type TrafficQueue = tech_profile.TrafficQueue
+
+// TrafficQueues from public import voltha_protos/tech_profile.proto
+type TrafficQueues = tech_profile.TrafficQueues
+
+// Direction from public import voltha_protos/tech_profile.proto
+type Direction = tech_profile.Direction
+
+var Direction_name = tech_profile.Direction_name
+var Direction_value = tech_profile.Direction_value
+
+const Direction_UPSTREAM = Direction(tech_profile.Direction_UPSTREAM)
+const Direction_DOWNSTREAM = Direction(tech_profile.Direction_DOWNSTREAM)
+const Direction_BIDIRECTIONAL = Direction(tech_profile.Direction_BIDIRECTIONAL)
+
+// SchedulingPolicy from public import voltha_protos/tech_profile.proto
+type SchedulingPolicy = tech_profile.SchedulingPolicy
+
+var SchedulingPolicy_name = tech_profile.SchedulingPolicy_name
+var SchedulingPolicy_value = tech_profile.SchedulingPolicy_value
+
+const SchedulingPolicy_WRR = SchedulingPolicy(tech_profile.SchedulingPolicy_WRR)
+const SchedulingPolicy_StrictPriority = SchedulingPolicy(tech_profile.SchedulingPolicy_StrictPriority)
+const SchedulingPolicy_Hybrid = SchedulingPolicy(tech_profile.SchedulingPolicy_Hybrid)
+
+// AdditionalBW from public import voltha_protos/tech_profile.proto
+type AdditionalBW = tech_profile.AdditionalBW
+
+var AdditionalBW_name = tech_profile.AdditionalBW_name
+var AdditionalBW_value = tech_profile.AdditionalBW_value
+
+const AdditionalBW_AdditionalBW_None = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_None)
+const AdditionalBW_AdditionalBW_NA = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_NA)
+const AdditionalBW_AdditionalBW_BestEffort = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_BestEffort)
+const AdditionalBW_AdditionalBW_Auto = AdditionalBW(tech_profile.AdditionalBW_AdditionalBW_Auto)
+
+// DiscardPolicy from public import voltha_protos/tech_profile.proto
+type DiscardPolicy = tech_profile.DiscardPolicy
+
+var DiscardPolicy_name = tech_profile.DiscardPolicy_name
+var DiscardPolicy_value = tech_profile.DiscardPolicy_value
+
+const DiscardPolicy_TailDrop = DiscardPolicy(tech_profile.DiscardPolicy_TailDrop)
+const DiscardPolicy_WTailDrop = DiscardPolicy(tech_profile.DiscardPolicy_WTailDrop)
+const DiscardPolicy_Red = DiscardPolicy(tech_profile.DiscardPolicy_Red)
+const DiscardPolicy_WRed = DiscardPolicy(tech_profile.DiscardPolicy_WRed)
+
+// InferredAdditionBWIndication from public import voltha_protos/tech_profile.proto
+type InferredAdditionBWIndication = tech_profile.InferredAdditionBWIndication
+
+var InferredAdditionBWIndication_name = tech_profile.InferredAdditionBWIndication_name
+var InferredAdditionBWIndication_value = tech_profile.InferredAdditionBWIndication_value
+
+const InferredAdditionBWIndication_InferredAdditionBWIndication_None = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_None)
+const InferredAdditionBWIndication_InferredAdditionBWIndication_Assured = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_Assured)
+const InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort = InferredAdditionBWIndication(tech_profile.InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort)
+
+type DeviceInfo_DeviceResourceRanges_Pool_PoolType int32
+
+const (
+	DeviceInfo_DeviceResourceRanges_Pool_ONU_ID     DeviceInfo_DeviceResourceRanges_Pool_PoolType = 0
+	DeviceInfo_DeviceResourceRanges_Pool_ALLOC_ID   DeviceInfo_DeviceResourceRanges_Pool_PoolType = 1
+	DeviceInfo_DeviceResourceRanges_Pool_GEMPORT_ID DeviceInfo_DeviceResourceRanges_Pool_PoolType = 2
+	DeviceInfo_DeviceResourceRanges_Pool_FLOW_ID    DeviceInfo_DeviceResourceRanges_Pool_PoolType = 3
+)
+
+var DeviceInfo_DeviceResourceRanges_Pool_PoolType_name = map[int32]string{
+	0: "ONU_ID",
+	1: "ALLOC_ID",
+	2: "GEMPORT_ID",
+	3: "FLOW_ID",
+}
+
+var DeviceInfo_DeviceResourceRanges_Pool_PoolType_value = map[string]int32{
+	"ONU_ID":     0,
+	"ALLOC_ID":   1,
+	"GEMPORT_ID": 2,
+	"FLOW_ID":    3,
+}
+
+func (x DeviceInfo_DeviceResourceRanges_Pool_PoolType) String() string {
+	return proto.EnumName(DeviceInfo_DeviceResourceRanges_Pool_PoolType_name, int32(x))
+}
+
+func (DeviceInfo_DeviceResourceRanges_Pool_PoolType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0, 0}
+}
+
+type DeviceInfo_DeviceResourceRanges_Pool_SharingType int32
+
+const (
+	DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF           DeviceInfo_DeviceResourceRanges_Pool_SharingType = 0
+	DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_ALL_TECH  DeviceInfo_DeviceResourceRanges_Pool_SharingType = 1
+	DeviceInfo_DeviceResourceRanges_Pool_SHARED_BY_ALL_INTF_SAME_TECH DeviceInfo_DeviceResourceRanges_Pool_SharingType = 2
+)
+
+var DeviceInfo_DeviceResourceRanges_Pool_SharingType_name = map[int32]string{
+	0: "DEDICATED_PER_INTF",
+	1: "SHARED_BY_ALL_INTF_ALL_TECH",
+	2: "SHARED_BY_ALL_INTF_SAME_TECH",
+}
+
+var DeviceInfo_DeviceResourceRanges_Pool_SharingType_value = map[string]int32{
+	"DEDICATED_PER_INTF":           0,
+	"SHARED_BY_ALL_INTF_ALL_TECH":  1,
+	"SHARED_BY_ALL_INTF_SAME_TECH": 2,
+}
+
+func (x DeviceInfo_DeviceResourceRanges_Pool_SharingType) String() string {
+	return proto.EnumName(DeviceInfo_DeviceResourceRanges_Pool_SharingType_name, int32(x))
+}
+
+func (DeviceInfo_DeviceResourceRanges_Pool_SharingType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0, 1}
+}
+
+type GroupMember_InterfaceType int32
+
+const (
+	GroupMember_PON           GroupMember_InterfaceType = 0
+	GroupMember_EPON_1G_PATH  GroupMember_InterfaceType = 1
+	GroupMember_EPON_10G_PATH GroupMember_InterfaceType = 2
+)
+
+var GroupMember_InterfaceType_name = map[int32]string{
+	0: "PON",
+	1: "EPON_1G_PATH",
+	2: "EPON_10G_PATH",
+}
+
+var GroupMember_InterfaceType_value = map[string]int32{
+	"PON":           0,
+	"EPON_1G_PATH":  1,
+	"EPON_10G_PATH": 2,
+}
+
+func (x GroupMember_InterfaceType) String() string {
+	return proto.EnumName(GroupMember_InterfaceType_name, int32(x))
+}
+
+func (GroupMember_InterfaceType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{38, 0}
+}
+
+type Group_GroupMembersCommand int32
+
+const (
+	Group_ADD_MEMBERS    Group_GroupMembersCommand = 0
+	Group_REMOVE_MEMBERS Group_GroupMembersCommand = 1
+	Group_SET_MEMBERS    Group_GroupMembersCommand = 2
+)
+
+var Group_GroupMembersCommand_name = map[int32]string{
+	0: "ADD_MEMBERS",
+	1: "REMOVE_MEMBERS",
+	2: "SET_MEMBERS",
+}
+
+var Group_GroupMembersCommand_value = map[string]int32{
+	"ADD_MEMBERS":    0,
+	"REMOVE_MEMBERS": 1,
+	"SET_MEMBERS":    2,
+}
+
+func (x Group_GroupMembersCommand) String() string {
+	return proto.EnumName(Group_GroupMembersCommand_name, int32(x))
+}
+
+func (Group_GroupMembersCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{39, 0}
+}
+
+type Indication struct {
+	// Types that are valid to be assigned to Data:
+	//	*Indication_OltInd
+	//	*Indication_IntfInd
+	//	*Indication_IntfOperInd
+	//	*Indication_OnuDiscInd
+	//	*Indication_OnuInd
+	//	*Indication_OmciInd
+	//	*Indication_PktInd
+	//	*Indication_PortStats
+	//	*Indication_FlowStats
+	//	*Indication_AlarmInd
+	Data                 isIndication_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Indication) Reset()         { *m = Indication{} }
+func (m *Indication) String() string { return proto.CompactTextString(m) }
+func (*Indication) ProtoMessage()    {}
+func (*Indication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{0}
+}
+
+func (m *Indication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Indication.Unmarshal(m, b)
+}
+func (m *Indication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Indication.Marshal(b, m, deterministic)
+}
+func (m *Indication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Indication.Merge(m, src)
+}
+func (m *Indication) XXX_Size() int {
+	return xxx_messageInfo_Indication.Size(m)
+}
+func (m *Indication) XXX_DiscardUnknown() {
+	xxx_messageInfo_Indication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Indication proto.InternalMessageInfo
+
+type isIndication_Data interface {
+	isIndication_Data()
+}
+
+type Indication_OltInd struct {
+	OltInd *OltIndication `protobuf:"bytes,1,opt,name=olt_ind,json=oltInd,proto3,oneof"`
+}
+
+type Indication_IntfInd struct {
+	IntfInd *IntfIndication `protobuf:"bytes,2,opt,name=intf_ind,json=intfInd,proto3,oneof"`
+}
+
+type Indication_IntfOperInd struct {
+	IntfOperInd *IntfOperIndication `protobuf:"bytes,3,opt,name=intf_oper_ind,json=intfOperInd,proto3,oneof"`
+}
+
+type Indication_OnuDiscInd struct {
+	OnuDiscInd *OnuDiscIndication `protobuf:"bytes,4,opt,name=onu_disc_ind,json=onuDiscInd,proto3,oneof"`
+}
+
+type Indication_OnuInd struct {
+	OnuInd *OnuIndication `protobuf:"bytes,5,opt,name=onu_ind,json=onuInd,proto3,oneof"`
+}
+
+type Indication_OmciInd struct {
+	OmciInd *OmciIndication `protobuf:"bytes,6,opt,name=omci_ind,json=omciInd,proto3,oneof"`
+}
+
+type Indication_PktInd struct {
+	PktInd *PacketIndication `protobuf:"bytes,7,opt,name=pkt_ind,json=pktInd,proto3,oneof"`
+}
+
+type Indication_PortStats struct {
+	PortStats *PortStatistics `protobuf:"bytes,8,opt,name=port_stats,json=portStats,proto3,oneof"`
+}
+
+type Indication_FlowStats struct {
+	FlowStats *FlowStatistics `protobuf:"bytes,9,opt,name=flow_stats,json=flowStats,proto3,oneof"`
+}
+
+type Indication_AlarmInd struct {
+	AlarmInd *AlarmIndication `protobuf:"bytes,10,opt,name=alarm_ind,json=alarmInd,proto3,oneof"`
+}
+
+func (*Indication_OltInd) isIndication_Data() {}
+
+func (*Indication_IntfInd) isIndication_Data() {}
+
+func (*Indication_IntfOperInd) isIndication_Data() {}
+
+func (*Indication_OnuDiscInd) isIndication_Data() {}
+
+func (*Indication_OnuInd) isIndication_Data() {}
+
+func (*Indication_OmciInd) isIndication_Data() {}
+
+func (*Indication_PktInd) isIndication_Data() {}
+
+func (*Indication_PortStats) isIndication_Data() {}
+
+func (*Indication_FlowStats) isIndication_Data() {}
+
+func (*Indication_AlarmInd) isIndication_Data() {}
+
+func (m *Indication) GetData() isIndication_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *Indication) GetOltInd() *OltIndication {
+	if x, ok := m.GetData().(*Indication_OltInd); ok {
+		return x.OltInd
+	}
+	return nil
+}
+
+func (m *Indication) GetIntfInd() *IntfIndication {
+	if x, ok := m.GetData().(*Indication_IntfInd); ok {
+		return x.IntfInd
+	}
+	return nil
+}
+
+func (m *Indication) GetIntfOperInd() *IntfOperIndication {
+	if x, ok := m.GetData().(*Indication_IntfOperInd); ok {
+		return x.IntfOperInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOnuDiscInd() *OnuDiscIndication {
+	if x, ok := m.GetData().(*Indication_OnuDiscInd); ok {
+		return x.OnuDiscInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOnuInd() *OnuIndication {
+	if x, ok := m.GetData().(*Indication_OnuInd); ok {
+		return x.OnuInd
+	}
+	return nil
+}
+
+func (m *Indication) GetOmciInd() *OmciIndication {
+	if x, ok := m.GetData().(*Indication_OmciInd); ok {
+		return x.OmciInd
+	}
+	return nil
+}
+
+func (m *Indication) GetPktInd() *PacketIndication {
+	if x, ok := m.GetData().(*Indication_PktInd); ok {
+		return x.PktInd
+	}
+	return nil
+}
+
+func (m *Indication) GetPortStats() *PortStatistics {
+	if x, ok := m.GetData().(*Indication_PortStats); ok {
+		return x.PortStats
+	}
+	return nil
+}
+
+func (m *Indication) GetFlowStats() *FlowStatistics {
+	if x, ok := m.GetData().(*Indication_FlowStats); ok {
+		return x.FlowStats
+	}
+	return nil
+}
+
+func (m *Indication) GetAlarmInd() *AlarmIndication {
+	if x, ok := m.GetData().(*Indication_AlarmInd); ok {
+		return x.AlarmInd
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Indication) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Indication_OltInd)(nil),
+		(*Indication_IntfInd)(nil),
+		(*Indication_IntfOperInd)(nil),
+		(*Indication_OnuDiscInd)(nil),
+		(*Indication_OnuInd)(nil),
+		(*Indication_OmciInd)(nil),
+		(*Indication_PktInd)(nil),
+		(*Indication_PortStats)(nil),
+		(*Indication_FlowStats)(nil),
+		(*Indication_AlarmInd)(nil),
+	}
+}
+
+type AlarmIndication struct {
+	// Types that are valid to be assigned to Data:
+	//	*AlarmIndication_LosInd
+	//	*AlarmIndication_DyingGaspInd
+	//	*AlarmIndication_OnuAlarmInd
+	//	*AlarmIndication_OnuStartupFailInd
+	//	*AlarmIndication_OnuSignalDegradeInd
+	//	*AlarmIndication_OnuDriftOfWindowInd
+	//	*AlarmIndication_OnuLossOmciInd
+	//	*AlarmIndication_OnuSignalsFailInd
+	//	*AlarmIndication_OnuTiwiInd
+	//	*AlarmIndication_OnuActivationFailInd
+	//	*AlarmIndication_OnuProcessingErrorInd
+	//	*AlarmIndication_OnuLossOfSyncFailInd
+	//	*AlarmIndication_OnuItuPonStatsInd
+	//	*AlarmIndication_OnuDeactivationFailureInd
+	//	*AlarmIndication_OnuRemoteDefectInd
+	Data                 isAlarmIndication_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *AlarmIndication) Reset()         { *m = AlarmIndication{} }
+func (m *AlarmIndication) String() string { return proto.CompactTextString(m) }
+func (*AlarmIndication) ProtoMessage()    {}
+func (*AlarmIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{1}
+}
+
+func (m *AlarmIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmIndication.Unmarshal(m, b)
+}
+func (m *AlarmIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmIndication.Marshal(b, m, deterministic)
+}
+func (m *AlarmIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmIndication.Merge(m, src)
+}
+func (m *AlarmIndication) XXX_Size() int {
+	return xxx_messageInfo_AlarmIndication.Size(m)
+}
+func (m *AlarmIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmIndication proto.InternalMessageInfo
+
+type isAlarmIndication_Data interface {
+	isAlarmIndication_Data()
+}
+
+type AlarmIndication_LosInd struct {
+	LosInd *LosIndication `protobuf:"bytes,1,opt,name=los_ind,json=losInd,proto3,oneof"`
+}
+
+type AlarmIndication_DyingGaspInd struct {
+	DyingGaspInd *DyingGaspIndication `protobuf:"bytes,2,opt,name=dying_gasp_ind,json=dyingGaspInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuAlarmInd struct {
+	OnuAlarmInd *OnuAlarmIndication `protobuf:"bytes,3,opt,name=onu_alarm_ind,json=onuAlarmInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuStartupFailInd struct {
+	OnuStartupFailInd *OnuStartupFailureIndication `protobuf:"bytes,4,opt,name=onu_startup_fail_ind,json=onuStartupFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuSignalDegradeInd struct {
+	OnuSignalDegradeInd *OnuSignalDegradeIndication `protobuf:"bytes,5,opt,name=onu_signal_degrade_ind,json=onuSignalDegradeInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuDriftOfWindowInd struct {
+	OnuDriftOfWindowInd *OnuDriftOfWindowIndication `protobuf:"bytes,6,opt,name=onu_drift_of_window_ind,json=onuDriftOfWindowInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuLossOmciInd struct {
+	OnuLossOmciInd *OnuLossOfOmciChannelIndication `protobuf:"bytes,7,opt,name=onu_loss_omci_ind,json=onuLossOmciInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuSignalsFailInd struct {
+	OnuSignalsFailInd *OnuSignalsFailureIndication `protobuf:"bytes,8,opt,name=onu_signals_fail_ind,json=onuSignalsFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuTiwiInd struct {
+	OnuTiwiInd *OnuTransmissionInterferenceWarning `protobuf:"bytes,9,opt,name=onu_tiwi_ind,json=onuTiwiInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuActivationFailInd struct {
+	OnuActivationFailInd *OnuActivationFailureIndication `protobuf:"bytes,10,opt,name=onu_activation_fail_ind,json=onuActivationFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuProcessingErrorInd struct {
+	OnuProcessingErrorInd *OnuProcessingErrorIndication `protobuf:"bytes,11,opt,name=onu_processing_error_ind,json=onuProcessingErrorInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuLossOfSyncFailInd struct {
+	OnuLossOfSyncFailInd *OnuLossOfKeySyncFailureIndication `protobuf:"bytes,12,opt,name=onu_loss_of_sync_fail_ind,json=onuLossOfSyncFailInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuItuPonStatsInd struct {
+	OnuItuPonStatsInd *OnuItuPonStatsIndication `protobuf:"bytes,13,opt,name=onu_itu_pon_stats_ind,json=onuItuPonStatsInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuDeactivationFailureInd struct {
+	OnuDeactivationFailureInd *OnuDeactivationFailureIndication `protobuf:"bytes,14,opt,name=onu_deactivation_failure_ind,json=onuDeactivationFailureInd,proto3,oneof"`
+}
+
+type AlarmIndication_OnuRemoteDefectInd struct {
+	OnuRemoteDefectInd *OnuRemoteDefectIndication `protobuf:"bytes,15,opt,name=onu_remote_defect_ind,json=onuRemoteDefectInd,proto3,oneof"`
+}
+
+func (*AlarmIndication_LosInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_DyingGaspInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuAlarmInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuStartupFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuSignalDegradeInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuDriftOfWindowInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuLossOmciInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuSignalsFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuTiwiInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuActivationFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuProcessingErrorInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuLossOfSyncFailInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuItuPonStatsInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuDeactivationFailureInd) isAlarmIndication_Data() {}
+
+func (*AlarmIndication_OnuRemoteDefectInd) isAlarmIndication_Data() {}
+
+func (m *AlarmIndication) GetData() isAlarmIndication_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetLosInd() *LosIndication {
+	if x, ok := m.GetData().(*AlarmIndication_LosInd); ok {
+		return x.LosInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetDyingGaspInd() *DyingGaspIndication {
+	if x, ok := m.GetData().(*AlarmIndication_DyingGaspInd); ok {
+		return x.DyingGaspInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuAlarmInd() *OnuAlarmIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuAlarmInd); ok {
+		return x.OnuAlarmInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuStartupFailInd() *OnuStartupFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuStartupFailInd); ok {
+		return x.OnuStartupFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuSignalDegradeInd() *OnuSignalDegradeIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuSignalDegradeInd); ok {
+		return x.OnuSignalDegradeInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuDriftOfWindowInd() *OnuDriftOfWindowIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuDriftOfWindowInd); ok {
+		return x.OnuDriftOfWindowInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuLossOmciInd() *OnuLossOfOmciChannelIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuLossOmciInd); ok {
+		return x.OnuLossOmciInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuSignalsFailInd() *OnuSignalsFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuSignalsFailInd); ok {
+		return x.OnuSignalsFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuTiwiInd() *OnuTransmissionInterferenceWarning {
+	if x, ok := m.GetData().(*AlarmIndication_OnuTiwiInd); ok {
+		return x.OnuTiwiInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuActivationFailInd() *OnuActivationFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuActivationFailInd); ok {
+		return x.OnuActivationFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuProcessingErrorInd() *OnuProcessingErrorIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuProcessingErrorInd); ok {
+		return x.OnuProcessingErrorInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuLossOfSyncFailInd() *OnuLossOfKeySyncFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuLossOfSyncFailInd); ok {
+		return x.OnuLossOfSyncFailInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuItuPonStatsInd() *OnuItuPonStatsIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuItuPonStatsInd); ok {
+		return x.OnuItuPonStatsInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuDeactivationFailureInd() *OnuDeactivationFailureIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuDeactivationFailureInd); ok {
+		return x.OnuDeactivationFailureInd
+	}
+	return nil
+}
+
+func (m *AlarmIndication) GetOnuRemoteDefectInd() *OnuRemoteDefectIndication {
+	if x, ok := m.GetData().(*AlarmIndication_OnuRemoteDefectInd); ok {
+		return x.OnuRemoteDefectInd
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*AlarmIndication) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*AlarmIndication_LosInd)(nil),
+		(*AlarmIndication_DyingGaspInd)(nil),
+		(*AlarmIndication_OnuAlarmInd)(nil),
+		(*AlarmIndication_OnuStartupFailInd)(nil),
+		(*AlarmIndication_OnuSignalDegradeInd)(nil),
+		(*AlarmIndication_OnuDriftOfWindowInd)(nil),
+		(*AlarmIndication_OnuLossOmciInd)(nil),
+		(*AlarmIndication_OnuSignalsFailInd)(nil),
+		(*AlarmIndication_OnuTiwiInd)(nil),
+		(*AlarmIndication_OnuActivationFailInd)(nil),
+		(*AlarmIndication_OnuProcessingErrorInd)(nil),
+		(*AlarmIndication_OnuLossOfSyncFailInd)(nil),
+		(*AlarmIndication_OnuItuPonStatsInd)(nil),
+		(*AlarmIndication_OnuDeactivationFailureInd)(nil),
+		(*AlarmIndication_OnuRemoteDefectInd)(nil),
+	}
+}
+
+type OltIndication struct {
+	OperState            string   `protobuf:"bytes,1,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OltIndication) Reset()         { *m = OltIndication{} }
+func (m *OltIndication) String() string { return proto.CompactTextString(m) }
+func (*OltIndication) ProtoMessage()    {}
+func (*OltIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{2}
+}
+
+func (m *OltIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OltIndication.Unmarshal(m, b)
+}
+func (m *OltIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OltIndication.Marshal(b, m, deterministic)
+}
+func (m *OltIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OltIndication.Merge(m, src)
+}
+func (m *OltIndication) XXX_Size() int {
+	return xxx_messageInfo_OltIndication.Size(m)
+}
+func (m *OltIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OltIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OltIndication proto.InternalMessageInfo
+
+func (m *OltIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type IntfIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OperState            string   `protobuf:"bytes,2,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IntfIndication) Reset()         { *m = IntfIndication{} }
+func (m *IntfIndication) String() string { return proto.CompactTextString(m) }
+func (*IntfIndication) ProtoMessage()    {}
+func (*IntfIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{3}
+}
+
+func (m *IntfIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IntfIndication.Unmarshal(m, b)
+}
+func (m *IntfIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IntfIndication.Marshal(b, m, deterministic)
+}
+func (m *IntfIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IntfIndication.Merge(m, src)
+}
+func (m *IntfIndication) XXX_Size() int {
+	return xxx_messageInfo_IntfIndication.Size(m)
+}
+func (m *IntfIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_IntfIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntfIndication proto.InternalMessageInfo
+
+func (m *IntfIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *IntfIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type OnuDiscIndication struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,2,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OnuDiscIndication) Reset()         { *m = OnuDiscIndication{} }
+func (m *OnuDiscIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuDiscIndication) ProtoMessage()    {}
+func (*OnuDiscIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{4}
+}
+
+func (m *OnuDiscIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuDiscIndication.Unmarshal(m, b)
+}
+func (m *OnuDiscIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuDiscIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuDiscIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuDiscIndication.Merge(m, src)
+}
+func (m *OnuDiscIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuDiscIndication.Size(m)
+}
+func (m *OnuDiscIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuDiscIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuDiscIndication proto.InternalMessageInfo
+
+func (m *OnuDiscIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuDiscIndication) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+type OnuIndication struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32        `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	OperState            string        `protobuf:"bytes,3,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	AdminState           string        `protobuf:"bytes,5,opt,name=admin_state,json=adminState,proto3" json:"admin_state,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,4,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OnuIndication) Reset()         { *m = OnuIndication{} }
+func (m *OnuIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuIndication) ProtoMessage()    {}
+func (*OnuIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{5}
+}
+
+func (m *OnuIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuIndication.Unmarshal(m, b)
+}
+func (m *OnuIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuIndication.Merge(m, src)
+}
+func (m *OnuIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuIndication.Size(m)
+}
+func (m *OnuIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuIndication proto.InternalMessageInfo
+
+func (m *OnuIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+func (m *OnuIndication) GetAdminState() string {
+	if m != nil {
+		return m.AdminState
+	}
+	return ""
+}
+
+func (m *OnuIndication) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+type IntfOperIndication struct {
+	Type                 string   `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	IntfId               uint32   `protobuf:"fixed32,2,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OperState            string   `protobuf:"bytes,3,opt,name=oper_state,json=operState,proto3" json:"oper_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IntfOperIndication) Reset()         { *m = IntfOperIndication{} }
+func (m *IntfOperIndication) String() string { return proto.CompactTextString(m) }
+func (*IntfOperIndication) ProtoMessage()    {}
+func (*IntfOperIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{6}
+}
+
+func (m *IntfOperIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IntfOperIndication.Unmarshal(m, b)
+}
+func (m *IntfOperIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IntfOperIndication.Marshal(b, m, deterministic)
+}
+func (m *IntfOperIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IntfOperIndication.Merge(m, src)
+}
+func (m *IntfOperIndication) XXX_Size() int {
+	return xxx_messageInfo_IntfOperIndication.Size(m)
+}
+func (m *IntfOperIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_IntfOperIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IntfOperIndication proto.InternalMessageInfo
+
+func (m *IntfOperIndication) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *IntfOperIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *IntfOperIndication) GetOperState() string {
+	if m != nil {
+		return m.OperState
+	}
+	return ""
+}
+
+type OmciIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciIndication) Reset()         { *m = OmciIndication{} }
+func (m *OmciIndication) String() string { return proto.CompactTextString(m) }
+func (*OmciIndication) ProtoMessage()    {}
+func (*OmciIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{7}
+}
+
+func (m *OmciIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciIndication.Unmarshal(m, b)
+}
+func (m *OmciIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciIndication.Marshal(b, m, deterministic)
+}
+func (m *OmciIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciIndication.Merge(m, src)
+}
+func (m *OmciIndication) XXX_Size() int {
+	return xxx_messageInfo_OmciIndication.Size(m)
+}
+func (m *OmciIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciIndication proto.InternalMessageInfo
+
+func (m *OmciIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OmciIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OmciIndication) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type PacketIndication struct {
+	IntfType             string   `protobuf:"bytes,5,opt,name=intf_type,json=intfType,proto3" json:"intf_type,omitempty"`
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	GemportId            uint32   `protobuf:"fixed32,2,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	FlowId               uint32   `protobuf:"fixed32,3,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	PortNo               uint32   `protobuf:"fixed32,6,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	Cookie               uint64   `protobuf:"fixed64,7,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,4,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PacketIndication) Reset()         { *m = PacketIndication{} }
+func (m *PacketIndication) String() string { return proto.CompactTextString(m) }
+func (*PacketIndication) ProtoMessage()    {}
+func (*PacketIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{8}
+}
+
+func (m *PacketIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketIndication.Unmarshal(m, b)
+}
+func (m *PacketIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketIndication.Marshal(b, m, deterministic)
+}
+func (m *PacketIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketIndication.Merge(m, src)
+}
+func (m *PacketIndication) XXX_Size() int {
+	return xxx_messageInfo_PacketIndication.Size(m)
+}
+func (m *PacketIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketIndication proto.InternalMessageInfo
+
+func (m *PacketIndication) GetIntfType() string {
+	if m != nil {
+		return m.IntfType
+	}
+	return ""
+}
+
+func (m *PacketIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *PacketIndication) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type Interface struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Interface) Reset()         { *m = Interface{} }
+func (m *Interface) String() string { return proto.CompactTextString(m) }
+func (*Interface) ProtoMessage()    {}
+func (*Interface) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{9}
+}
+
+func (m *Interface) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Interface.Unmarshal(m, b)
+}
+func (m *Interface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Interface.Marshal(b, m, deterministic)
+}
+func (m *Interface) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Interface.Merge(m, src)
+}
+func (m *Interface) XXX_Size() int {
+	return xxx_messageInfo_Interface.Size(m)
+}
+func (m *Interface) XXX_DiscardUnknown() {
+	xxx_messageInfo_Interface.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Interface proto.InternalMessageInfo
+
+func (m *Interface) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+type Heartbeat struct {
+	HeartbeatSignature   uint32   `protobuf:"fixed32,1,opt,name=heartbeat_signature,json=heartbeatSignature,proto3" json:"heartbeat_signature,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Heartbeat) Reset()         { *m = Heartbeat{} }
+func (m *Heartbeat) String() string { return proto.CompactTextString(m) }
+func (*Heartbeat) ProtoMessage()    {}
+func (*Heartbeat) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{10}
+}
+
+func (m *Heartbeat) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Heartbeat.Unmarshal(m, b)
+}
+func (m *Heartbeat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Heartbeat.Marshal(b, m, deterministic)
+}
+func (m *Heartbeat) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Heartbeat.Merge(m, src)
+}
+func (m *Heartbeat) XXX_Size() int {
+	return xxx_messageInfo_Heartbeat.Size(m)
+}
+func (m *Heartbeat) XXX_DiscardUnknown() {
+	xxx_messageInfo_Heartbeat.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Heartbeat proto.InternalMessageInfo
+
+func (m *Heartbeat) GetHeartbeatSignature() uint32 {
+	if m != nil {
+		return m.HeartbeatSignature
+	}
+	return 0
+}
+
+type Onu struct {
+	IntfId               uint32        `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32        `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	SerialNumber         *SerialNumber `protobuf:"bytes,3,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	Pir                  uint32        `protobuf:"fixed32,4,opt,name=pir,proto3" json:"pir,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *Onu) Reset()         { *m = Onu{} }
+func (m *Onu) String() string { return proto.CompactTextString(m) }
+func (*Onu) ProtoMessage()    {}
+func (*Onu) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{11}
+}
+
+func (m *Onu) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Onu.Unmarshal(m, b)
+}
+func (m *Onu) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Onu.Marshal(b, m, deterministic)
+}
+func (m *Onu) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Onu.Merge(m, src)
+}
+func (m *Onu) XXX_Size() int {
+	return xxx_messageInfo_Onu.Size(m)
+}
+func (m *Onu) XXX_DiscardUnknown() {
+	xxx_messageInfo_Onu.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Onu proto.InternalMessageInfo
+
+func (m *Onu) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *Onu) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Onu) GetSerialNumber() *SerialNumber {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return nil
+}
+
+func (m *Onu) GetPir() uint32 {
+	if m != nil {
+		return m.Pir
+	}
+	return 0
+}
+
+type OmciMsg struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciMsg) Reset()         { *m = OmciMsg{} }
+func (m *OmciMsg) String() string { return proto.CompactTextString(m) }
+func (*OmciMsg) ProtoMessage()    {}
+func (*OmciMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{12}
+}
+
+func (m *OmciMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciMsg.Unmarshal(m, b)
+}
+func (m *OmciMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciMsg.Marshal(b, m, deterministic)
+}
+func (m *OmciMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciMsg.Merge(m, src)
+}
+func (m *OmciMsg) XXX_Size() int {
+	return xxx_messageInfo_OmciMsg.Size(m)
+}
+func (m *OmciMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciMsg proto.InternalMessageInfo
+
+func (m *OmciMsg) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OmciMsg) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OmciMsg) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type OnuPacket struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	PortNo               uint32   `protobuf:"fixed32,4,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	GemportId            uint32   `protobuf:"fixed32,5,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,3,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuPacket) Reset()         { *m = OnuPacket{} }
+func (m *OnuPacket) String() string { return proto.CompactTextString(m) }
+func (*OnuPacket) ProtoMessage()    {}
+func (*OnuPacket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{13}
+}
+
+func (m *OnuPacket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuPacket.Unmarshal(m, b)
+}
+func (m *OnuPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuPacket.Marshal(b, m, deterministic)
+}
+func (m *OnuPacket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuPacket.Merge(m, src)
+}
+func (m *OnuPacket) XXX_Size() int {
+	return xxx_messageInfo_OnuPacket.Size(m)
+}
+func (m *OnuPacket) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuPacket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuPacket proto.InternalMessageInfo
+
+func (m *OnuPacket) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *OnuPacket) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type UplinkPacket struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	Pkt                  []byte   `protobuf:"bytes,2,opt,name=pkt,proto3" json:"pkt,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UplinkPacket) Reset()         { *m = UplinkPacket{} }
+func (m *UplinkPacket) String() string { return proto.CompactTextString(m) }
+func (*UplinkPacket) ProtoMessage()    {}
+func (*UplinkPacket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{14}
+}
+
+func (m *UplinkPacket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UplinkPacket.Unmarshal(m, b)
+}
+func (m *UplinkPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UplinkPacket.Marshal(b, m, deterministic)
+}
+func (m *UplinkPacket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UplinkPacket.Merge(m, src)
+}
+func (m *UplinkPacket) XXX_Size() int {
+	return xxx_messageInfo_UplinkPacket.Size(m)
+}
+func (m *UplinkPacket) XXX_DiscardUnknown() {
+	xxx_messageInfo_UplinkPacket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UplinkPacket proto.InternalMessageInfo
+
+func (m *UplinkPacket) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *UplinkPacket) GetPkt() []byte {
+	if m != nil {
+		return m.Pkt
+	}
+	return nil
+}
+
+type DeviceInfo struct {
+	Vendor             string `protobuf:"bytes,1,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Model              string `protobuf:"bytes,2,opt,name=model,proto3" json:"model,omitempty"`
+	HardwareVersion    string `protobuf:"bytes,3,opt,name=hardware_version,json=hardwareVersion,proto3" json:"hardware_version,omitempty"`
+	FirmwareVersion    string `protobuf:"bytes,4,opt,name=firmware_version,json=firmwareVersion,proto3" json:"firmware_version,omitempty"`
+	DeviceId           string `protobuf:"bytes,16,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DeviceSerialNumber string `protobuf:"bytes,17,opt,name=device_serial_number,json=deviceSerialNumber,proto3" json:"device_serial_number,omitempty"`
+	// Total number of pon intf ports on the device
+	PonPorts uint32 `protobuf:"fixed32,12,opt,name=pon_ports,json=ponPorts,proto3" json:"pon_ports,omitempty"`
+	// If using global per-device technology profile. To be deprecated
+	Technology           string                             `protobuf:"bytes,5,opt,name=technology,proto3" json:"technology,omitempty"`
+	OnuIdStart           uint32                             `protobuf:"fixed32,6,opt,name=onu_id_start,json=onuIdStart,proto3" json:"onu_id_start,omitempty"`
+	OnuIdEnd             uint32                             `protobuf:"fixed32,7,opt,name=onu_id_end,json=onuIdEnd,proto3" json:"onu_id_end,omitempty"`
+	AllocIdStart         uint32                             `protobuf:"fixed32,8,opt,name=alloc_id_start,json=allocIdStart,proto3" json:"alloc_id_start,omitempty"`
+	AllocIdEnd           uint32                             `protobuf:"fixed32,9,opt,name=alloc_id_end,json=allocIdEnd,proto3" json:"alloc_id_end,omitempty"`
+	GemportIdStart       uint32                             `protobuf:"fixed32,10,opt,name=gemport_id_start,json=gemportIdStart,proto3" json:"gemport_id_start,omitempty"`
+	GemportIdEnd         uint32                             `protobuf:"fixed32,11,opt,name=gemport_id_end,json=gemportIdEnd,proto3" json:"gemport_id_end,omitempty"`
+	FlowIdStart          uint32                             `protobuf:"fixed32,13,opt,name=flow_id_start,json=flowIdStart,proto3" json:"flow_id_start,omitempty"`
+	FlowIdEnd            uint32                             `protobuf:"fixed32,14,opt,name=flow_id_end,json=flowIdEnd,proto3" json:"flow_id_end,omitempty"`
+	Ranges               []*DeviceInfo_DeviceResourceRanges `protobuf:"bytes,15,rep,name=ranges,proto3" json:"ranges,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *DeviceInfo) Reset()         { *m = DeviceInfo{} }
+func (m *DeviceInfo) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo) ProtoMessage()    {}
+func (*DeviceInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15}
+}
+
+func (m *DeviceInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo.Unmarshal(m, b)
+}
+func (m *DeviceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo.Merge(m, src)
+}
+func (m *DeviceInfo) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo.Size(m)
+}
+func (m *DeviceInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo proto.InternalMessageInfo
+
+func (m *DeviceInfo) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetModel() string {
+	if m != nil {
+		return m.Model
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetHardwareVersion() string {
+	if m != nil {
+		return m.HardwareVersion
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetFirmwareVersion() string {
+	if m != nil {
+		return m.FirmwareVersion
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetDeviceSerialNumber() string {
+	if m != nil {
+		return m.DeviceSerialNumber
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetPonPorts() uint32 {
+	if m != nil {
+		return m.PonPorts
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetTechnology() string {
+	if m != nil {
+		return m.Technology
+	}
+	return ""
+}
+
+func (m *DeviceInfo) GetOnuIdStart() uint32 {
+	if m != nil {
+		return m.OnuIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetOnuIdEnd() uint32 {
+	if m != nil {
+		return m.OnuIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetAllocIdStart() uint32 {
+	if m != nil {
+		return m.AllocIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetAllocIdEnd() uint32 {
+	if m != nil {
+		return m.AllocIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetGemportIdStart() uint32 {
+	if m != nil {
+		return m.GemportIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetGemportIdEnd() uint32 {
+	if m != nil {
+		return m.GemportIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetFlowIdStart() uint32 {
+	if m != nil {
+		return m.FlowIdStart
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetFlowIdEnd() uint32 {
+	if m != nil {
+		return m.FlowIdEnd
+	}
+	return 0
+}
+
+func (m *DeviceInfo) GetRanges() []*DeviceInfo_DeviceResourceRanges {
+	if m != nil {
+		return m.Ranges
+	}
+	return nil
+}
+
+type DeviceInfo_DeviceResourceRanges struct {
+	// List of 0 or more intf_ids that use the same technology and pools.
+	// If 0 intf_ids supplied, it implies ALL interfaces
+	IntfIds []uint32 `protobuf:"fixed32,1,rep,packed,name=intf_ids,json=intfIds,proto3" json:"intf_ids,omitempty"`
+	// Technology profile for this pool
+	Technology           string                                  `protobuf:"bytes,2,opt,name=technology,proto3" json:"technology,omitempty"`
+	Pools                []*DeviceInfo_DeviceResourceRanges_Pool `protobuf:"bytes,3,rep,name=pools,proto3" json:"pools,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                `json:"-"`
+	XXX_unrecognized     []byte                                  `json:"-"`
+	XXX_sizecache        int32                                   `json:"-"`
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) Reset()         { *m = DeviceInfo_DeviceResourceRanges{} }
+func (m *DeviceInfo_DeviceResourceRanges) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo_DeviceResourceRanges) ProtoMessage()    {}
+func (*DeviceInfo_DeviceResourceRanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0}
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Unmarshal(m, b)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Merge(m, src)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges.Size(m)
+}
+func (m *DeviceInfo_DeviceResourceRanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo_DeviceResourceRanges proto.InternalMessageInfo
+
+func (m *DeviceInfo_DeviceResourceRanges) GetIntfIds() []uint32 {
+	if m != nil {
+		return m.IntfIds
+	}
+	return nil
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) GetTechnology() string {
+	if m != nil {
+		return m.Technology
+	}
+	return ""
+}
+
+func (m *DeviceInfo_DeviceResourceRanges) GetPools() []*DeviceInfo_DeviceResourceRanges_Pool {
+	if m != nil {
+		return m.Pools
+	}
+	return nil
+}
+
+type DeviceInfo_DeviceResourceRanges_Pool struct {
+	Type                 DeviceInfo_DeviceResourceRanges_Pool_PoolType    `protobuf:"varint,1,opt,name=type,proto3,enum=openolt.DeviceInfo_DeviceResourceRanges_Pool_PoolType" json:"type,omitempty"`
+	Sharing              DeviceInfo_DeviceResourceRanges_Pool_SharingType `protobuf:"varint,2,opt,name=sharing,proto3,enum=openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType" json:"sharing,omitempty"`
+	Start                uint32                                           `protobuf:"fixed32,3,opt,name=start,proto3" json:"start,omitempty"`
+	End                  uint32                                           `protobuf:"fixed32,4,opt,name=end,proto3" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                         `json:"-"`
+	XXX_unrecognized     []byte                                           `json:"-"`
+	XXX_sizecache        int32                                            `json:"-"`
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) Reset()         { *m = DeviceInfo_DeviceResourceRanges_Pool{} }
+func (m *DeviceInfo_DeviceResourceRanges_Pool) String() string { return proto.CompactTextString(m) }
+func (*DeviceInfo_DeviceResourceRanges_Pool) ProtoMessage()    {}
+func (*DeviceInfo_DeviceResourceRanges_Pool) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{15, 0, 0}
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Unmarshal(m, b)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Marshal(b, m, deterministic)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Merge(m, src)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_Size() int {
+	return xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.Size(m)
+}
+func (m *DeviceInfo_DeviceResourceRanges_Pool) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceInfo_DeviceResourceRanges_Pool proto.InternalMessageInfo
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetType() DeviceInfo_DeviceResourceRanges_Pool_PoolType {
+	if m != nil {
+		return m.Type
+	}
+	return DeviceInfo_DeviceResourceRanges_Pool_ONU_ID
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetSharing() DeviceInfo_DeviceResourceRanges_Pool_SharingType {
+	if m != nil {
+		return m.Sharing
+	}
+	return DeviceInfo_DeviceResourceRanges_Pool_DEDICATED_PER_INTF
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetStart() uint32 {
+	if m != nil {
+		return m.Start
+	}
+	return 0
+}
+
+func (m *DeviceInfo_DeviceResourceRanges_Pool) GetEnd() uint32 {
+	if m != nil {
+		return m.End
+	}
+	return 0
+}
+
+type Classifier struct {
+	OTpid                uint32   `protobuf:"fixed32,1,opt,name=o_tpid,json=oTpid,proto3" json:"o_tpid,omitempty"`
+	OVid                 uint32   `protobuf:"fixed32,2,opt,name=o_vid,json=oVid,proto3" json:"o_vid,omitempty"`
+	ITpid                uint32   `protobuf:"fixed32,3,opt,name=i_tpid,json=iTpid,proto3" json:"i_tpid,omitempty"`
+	IVid                 uint32   `protobuf:"fixed32,4,opt,name=i_vid,json=iVid,proto3" json:"i_vid,omitempty"`
+	OPbits               uint32   `protobuf:"fixed32,5,opt,name=o_pbits,json=oPbits,proto3" json:"o_pbits,omitempty"`
+	IPbits               uint32   `protobuf:"fixed32,6,opt,name=i_pbits,json=iPbits,proto3" json:"i_pbits,omitempty"`
+	EthType              uint32   `protobuf:"fixed32,7,opt,name=eth_type,json=ethType,proto3" json:"eth_type,omitempty"`
+	DstMac               []byte   `protobuf:"bytes,8,opt,name=dst_mac,json=dstMac,proto3" json:"dst_mac,omitempty"`
+	SrcMac               []byte   `protobuf:"bytes,9,opt,name=src_mac,json=srcMac,proto3" json:"src_mac,omitempty"`
+	IpProto              uint32   `protobuf:"fixed32,10,opt,name=ip_proto,json=ipProto,proto3" json:"ip_proto,omitempty"`
+	DstIp                uint32   `protobuf:"fixed32,11,opt,name=dst_ip,json=dstIp,proto3" json:"dst_ip,omitempty"`
+	SrcIp                uint32   `protobuf:"fixed32,12,opt,name=src_ip,json=srcIp,proto3" json:"src_ip,omitempty"`
+	SrcPort              uint32   `protobuf:"fixed32,13,opt,name=src_port,json=srcPort,proto3" json:"src_port,omitempty"`
+	DstPort              uint32   `protobuf:"fixed32,14,opt,name=dst_port,json=dstPort,proto3" json:"dst_port,omitempty"`
+	PktTagType           string   `protobuf:"bytes,15,opt,name=pkt_tag_type,json=pktTagType,proto3" json:"pkt_tag_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Classifier) Reset()         { *m = Classifier{} }
+func (m *Classifier) String() string { return proto.CompactTextString(m) }
+func (*Classifier) ProtoMessage()    {}
+func (*Classifier) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{16}
+}
+
+func (m *Classifier) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Classifier.Unmarshal(m, b)
+}
+func (m *Classifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Classifier.Marshal(b, m, deterministic)
+}
+func (m *Classifier) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Classifier.Merge(m, src)
+}
+func (m *Classifier) XXX_Size() int {
+	return xxx_messageInfo_Classifier.Size(m)
+}
+func (m *Classifier) XXX_DiscardUnknown() {
+	xxx_messageInfo_Classifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Classifier proto.InternalMessageInfo
+
+func (m *Classifier) GetOTpid() uint32 {
+	if m != nil {
+		return m.OTpid
+	}
+	return 0
+}
+
+func (m *Classifier) GetOVid() uint32 {
+	if m != nil {
+		return m.OVid
+	}
+	return 0
+}
+
+func (m *Classifier) GetITpid() uint32 {
+	if m != nil {
+		return m.ITpid
+	}
+	return 0
+}
+
+func (m *Classifier) GetIVid() uint32 {
+	if m != nil {
+		return m.IVid
+	}
+	return 0
+}
+
+func (m *Classifier) GetOPbits() uint32 {
+	if m != nil {
+		return m.OPbits
+	}
+	return 0
+}
+
+func (m *Classifier) GetIPbits() uint32 {
+	if m != nil {
+		return m.IPbits
+	}
+	return 0
+}
+
+func (m *Classifier) GetEthType() uint32 {
+	if m != nil {
+		return m.EthType
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstMac() []byte {
+	if m != nil {
+		return m.DstMac
+	}
+	return nil
+}
+
+func (m *Classifier) GetSrcMac() []byte {
+	if m != nil {
+		return m.SrcMac
+	}
+	return nil
+}
+
+func (m *Classifier) GetIpProto() uint32 {
+	if m != nil {
+		return m.IpProto
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstIp() uint32 {
+	if m != nil {
+		return m.DstIp
+	}
+	return 0
+}
+
+func (m *Classifier) GetSrcIp() uint32 {
+	if m != nil {
+		return m.SrcIp
+	}
+	return 0
+}
+
+func (m *Classifier) GetSrcPort() uint32 {
+	if m != nil {
+		return m.SrcPort
+	}
+	return 0
+}
+
+func (m *Classifier) GetDstPort() uint32 {
+	if m != nil {
+		return m.DstPort
+	}
+	return 0
+}
+
+func (m *Classifier) GetPktTagType() string {
+	if m != nil {
+		return m.PktTagType
+	}
+	return ""
+}
+
+type ActionCmd struct {
+	AddOuterTag          bool     `protobuf:"varint,1,opt,name=add_outer_tag,json=addOuterTag,proto3" json:"add_outer_tag,omitempty"`
+	RemoveOuterTag       bool     `protobuf:"varint,2,opt,name=remove_outer_tag,json=removeOuterTag,proto3" json:"remove_outer_tag,omitempty"`
+	TrapToHost           bool     `protobuf:"varint,3,opt,name=trap_to_host,json=trapToHost,proto3" json:"trap_to_host,omitempty"`
+	RemarkOuterPbits     bool     `protobuf:"varint,4,opt,name=remark_outer_pbits,json=remarkOuterPbits,proto3" json:"remark_outer_pbits,omitempty"`
+	RemarkInnerPbits     bool     `protobuf:"varint,5,opt,name=remark_inner_pbits,json=remarkInnerPbits,proto3" json:"remark_inner_pbits,omitempty"`
+	AddInnerTag          bool     `protobuf:"varint,6,opt,name=add_inner_tag,json=addInnerTag,proto3" json:"add_inner_tag,omitempty"`
+	RemoveInnerTag       bool     `protobuf:"varint,7,opt,name=remove_inner_tag,json=removeInnerTag,proto3" json:"remove_inner_tag,omitempty"`
+	TranslateInnerTag    bool     `protobuf:"varint,8,opt,name=translate_inner_tag,json=translateInnerTag,proto3" json:"translate_inner_tag,omitempty"`
+	TranslateOuterTag    bool     `protobuf:"varint,9,opt,name=translate_outer_tag,json=translateOuterTag,proto3" json:"translate_outer_tag,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ActionCmd) Reset()         { *m = ActionCmd{} }
+func (m *ActionCmd) String() string { return proto.CompactTextString(m) }
+func (*ActionCmd) ProtoMessage()    {}
+func (*ActionCmd) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{17}
+}
+
+func (m *ActionCmd) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ActionCmd.Unmarshal(m, b)
+}
+func (m *ActionCmd) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ActionCmd.Marshal(b, m, deterministic)
+}
+func (m *ActionCmd) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ActionCmd.Merge(m, src)
+}
+func (m *ActionCmd) XXX_Size() int {
+	return xxx_messageInfo_ActionCmd.Size(m)
+}
+func (m *ActionCmd) XXX_DiscardUnknown() {
+	xxx_messageInfo_ActionCmd.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ActionCmd proto.InternalMessageInfo
+
+func (m *ActionCmd) GetAddOuterTag() bool {
+	if m != nil {
+		return m.AddOuterTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetRemoveOuterTag() bool {
+	if m != nil {
+		return m.RemoveOuterTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetTrapToHost() bool {
+	if m != nil {
+		return m.TrapToHost
+	}
+	return false
+}
+
+func (m *ActionCmd) GetRemarkOuterPbits() bool {
+	if m != nil {
+		return m.RemarkOuterPbits
+	}
+	return false
+}
+
+func (m *ActionCmd) GetRemarkInnerPbits() bool {
+	if m != nil {
+		return m.RemarkInnerPbits
+	}
+	return false
+}
+
+func (m *ActionCmd) GetAddInnerTag() bool {
+	if m != nil {
+		return m.AddInnerTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetRemoveInnerTag() bool {
+	if m != nil {
+		return m.RemoveInnerTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetTranslateInnerTag() bool {
+	if m != nil {
+		return m.TranslateInnerTag
+	}
+	return false
+}
+
+func (m *ActionCmd) GetTranslateOuterTag() bool {
+	if m != nil {
+		return m.TranslateOuterTag
+	}
+	return false
+}
+
+type Action struct {
+	Cmd                  *ActionCmd `protobuf:"bytes,1,opt,name=cmd,proto3" json:"cmd,omitempty"`
+	OVid                 uint32     `protobuf:"fixed32,2,opt,name=o_vid,json=oVid,proto3" json:"o_vid,omitempty"`
+	OPbits               uint32     `protobuf:"fixed32,3,opt,name=o_pbits,json=oPbits,proto3" json:"o_pbits,omitempty"`
+	OTpid                uint32     `protobuf:"fixed32,4,opt,name=o_tpid,json=oTpid,proto3" json:"o_tpid,omitempty"`
+	IVid                 uint32     `protobuf:"fixed32,5,opt,name=i_vid,json=iVid,proto3" json:"i_vid,omitempty"`
+	IPbits               uint32     `protobuf:"fixed32,6,opt,name=i_pbits,json=iPbits,proto3" json:"i_pbits,omitempty"`
+	ITpid                uint32     `protobuf:"fixed32,7,opt,name=i_tpid,json=iTpid,proto3" json:"i_tpid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *Action) Reset()         { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage()    {}
+func (*Action) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{18}
+}
+
+func (m *Action) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Action.Unmarshal(m, b)
+}
+func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Action.Marshal(b, m, deterministic)
+}
+func (m *Action) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Action.Merge(m, src)
+}
+func (m *Action) XXX_Size() int {
+	return xxx_messageInfo_Action.Size(m)
+}
+func (m *Action) XXX_DiscardUnknown() {
+	xxx_messageInfo_Action.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Action proto.InternalMessageInfo
+
+func (m *Action) GetCmd() *ActionCmd {
+	if m != nil {
+		return m.Cmd
+	}
+	return nil
+}
+
+func (m *Action) GetOVid() uint32 {
+	if m != nil {
+		return m.OVid
+	}
+	return 0
+}
+
+func (m *Action) GetOPbits() uint32 {
+	if m != nil {
+		return m.OPbits
+	}
+	return 0
+}
+
+func (m *Action) GetOTpid() uint32 {
+	if m != nil {
+		return m.OTpid
+	}
+	return 0
+}
+
+func (m *Action) GetIVid() uint32 {
+	if m != nil {
+		return m.IVid
+	}
+	return 0
+}
+
+func (m *Action) GetIPbits() uint32 {
+	if m != nil {
+		return m.IPbits
+	}
+	return 0
+}
+
+func (m *Action) GetITpid() uint32 {
+	if m != nil {
+		return m.ITpid
+	}
+	return 0
+}
+
+type Flow struct {
+	AccessIntfId         int32       `protobuf:"fixed32,1,opt,name=access_intf_id,json=accessIntfId,proto3" json:"access_intf_id,omitempty"`
+	OnuId                int32       `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                int32       `protobuf:"fixed32,11,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	FlowId               uint32      `protobuf:"fixed32,3,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	FlowType             string      `protobuf:"bytes,4,opt,name=flow_type,json=flowType,proto3" json:"flow_type,omitempty"`
+	AllocId              int32       `protobuf:"fixed32,10,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	NetworkIntfId        int32       `protobuf:"fixed32,5,opt,name=network_intf_id,json=networkIntfId,proto3" json:"network_intf_id,omitempty"`
+	GemportId            int32       `protobuf:"fixed32,6,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	Classifier           *Classifier `protobuf:"bytes,7,opt,name=classifier,proto3" json:"classifier,omitempty"`
+	Action               *Action     `protobuf:"bytes,8,opt,name=action,proto3" json:"action,omitempty"`
+	Priority             int32       `protobuf:"fixed32,9,opt,name=priority,proto3" json:"priority,omitempty"`
+	Cookie               uint64      `protobuf:"fixed64,12,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	PortNo               uint32      `protobuf:"fixed32,13,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	GroupId              uint32      `protobuf:"fixed32,14,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Flow) Reset()         { *m = Flow{} }
+func (m *Flow) String() string { return proto.CompactTextString(m) }
+func (*Flow) ProtoMessage()    {}
+func (*Flow) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{19}
+}
+
+func (m *Flow) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Flow.Unmarshal(m, b)
+}
+func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Flow.Marshal(b, m, deterministic)
+}
+func (m *Flow) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Flow.Merge(m, src)
+}
+func (m *Flow) XXX_Size() int {
+	return xxx_messageInfo_Flow.Size(m)
+}
+func (m *Flow) XXX_DiscardUnknown() {
+	xxx_messageInfo_Flow.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Flow proto.InternalMessageInfo
+
+func (m *Flow) GetAccessIntfId() int32 {
+	if m != nil {
+		return m.AccessIntfId
+	}
+	return 0
+}
+
+func (m *Flow) GetOnuId() int32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Flow) GetUniId() int32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *Flow) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *Flow) GetFlowType() string {
+	if m != nil {
+		return m.FlowType
+	}
+	return ""
+}
+
+func (m *Flow) GetAllocId() int32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *Flow) GetNetworkIntfId() int32 {
+	if m != nil {
+		return m.NetworkIntfId
+	}
+	return 0
+}
+
+func (m *Flow) GetGemportId() int32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *Flow) GetClassifier() *Classifier {
+	if m != nil {
+		return m.Classifier
+	}
+	return nil
+}
+
+func (m *Flow) GetAction() *Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+func (m *Flow) GetPriority() int32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *Flow) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *Flow) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *Flow) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+type SerialNumber struct {
+	VendorId             []byte   `protobuf:"bytes,1,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	VendorSpecific       []byte   `protobuf:"bytes,2,opt,name=vendor_specific,json=vendorSpecific,proto3" json:"vendor_specific,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SerialNumber) Reset()         { *m = SerialNumber{} }
+func (m *SerialNumber) String() string { return proto.CompactTextString(m) }
+func (*SerialNumber) ProtoMessage()    {}
+func (*SerialNumber) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{20}
+}
+
+func (m *SerialNumber) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SerialNumber.Unmarshal(m, b)
+}
+func (m *SerialNumber) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SerialNumber.Marshal(b, m, deterministic)
+}
+func (m *SerialNumber) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SerialNumber.Merge(m, src)
+}
+func (m *SerialNumber) XXX_Size() int {
+	return xxx_messageInfo_SerialNumber.Size(m)
+}
+func (m *SerialNumber) XXX_DiscardUnknown() {
+	xxx_messageInfo_SerialNumber.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SerialNumber proto.InternalMessageInfo
+
+func (m *SerialNumber) GetVendorId() []byte {
+	if m != nil {
+		return m.VendorId
+	}
+	return nil
+}
+
+func (m *SerialNumber) GetVendorSpecific() []byte {
+	if m != nil {
+		return m.VendorSpecific
+	}
+	return nil
+}
+
+type PortStatistics struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	RxBytes              uint64   `protobuf:"fixed64,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxPackets            uint64   `protobuf:"fixed64,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	RxUcastPackets       uint64   `protobuf:"fixed64,4,opt,name=rx_ucast_packets,json=rxUcastPackets,proto3" json:"rx_ucast_packets,omitempty"`
+	RxMcastPackets       uint64   `protobuf:"fixed64,5,opt,name=rx_mcast_packets,json=rxMcastPackets,proto3" json:"rx_mcast_packets,omitempty"`
+	RxBcastPackets       uint64   `protobuf:"fixed64,6,opt,name=rx_bcast_packets,json=rxBcastPackets,proto3" json:"rx_bcast_packets,omitempty"`
+	RxErrorPackets       uint64   `protobuf:"fixed64,7,opt,name=rx_error_packets,json=rxErrorPackets,proto3" json:"rx_error_packets,omitempty"`
+	TxBytes              uint64   `protobuf:"fixed64,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"fixed64,9,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxUcastPackets       uint64   `protobuf:"fixed64,10,opt,name=tx_ucast_packets,json=txUcastPackets,proto3" json:"tx_ucast_packets,omitempty"`
+	TxMcastPackets       uint64   `protobuf:"fixed64,11,opt,name=tx_mcast_packets,json=txMcastPackets,proto3" json:"tx_mcast_packets,omitempty"`
+	TxBcastPackets       uint64   `protobuf:"fixed64,12,opt,name=tx_bcast_packets,json=txBcastPackets,proto3" json:"tx_bcast_packets,omitempty"`
+	TxErrorPackets       uint64   `protobuf:"fixed64,13,opt,name=tx_error_packets,json=txErrorPackets,proto3" json:"tx_error_packets,omitempty"`
+	RxCrcErrors          uint64   `protobuf:"fixed64,14,opt,name=rx_crc_errors,json=rxCrcErrors,proto3" json:"rx_crc_errors,omitempty"`
+	BipErrors            uint64   `protobuf:"fixed64,15,opt,name=bip_errors,json=bipErrors,proto3" json:"bip_errors,omitempty"`
+	Timestamp            uint32   `protobuf:"fixed32,16,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PortStatistics) Reset()         { *m = PortStatistics{} }
+func (m *PortStatistics) String() string { return proto.CompactTextString(m) }
+func (*PortStatistics) ProtoMessage()    {}
+func (*PortStatistics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{21}
+}
+
+func (m *PortStatistics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PortStatistics.Unmarshal(m, b)
+}
+func (m *PortStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PortStatistics.Marshal(b, m, deterministic)
+}
+func (m *PortStatistics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PortStatistics.Merge(m, src)
+}
+func (m *PortStatistics) XXX_Size() int {
+	return xxx_messageInfo_PortStatistics.Size(m)
+}
+func (m *PortStatistics) XXX_DiscardUnknown() {
+	xxx_messageInfo_PortStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PortStatistics proto.InternalMessageInfo
+
+func (m *PortStatistics) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxUcastPackets() uint64 {
+	if m != nil {
+		return m.RxUcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxMcastPackets() uint64 {
+	if m != nil {
+		return m.RxMcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxBcastPackets() uint64 {
+	if m != nil {
+		return m.RxBcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxErrorPackets() uint64 {
+	if m != nil {
+		return m.RxErrorPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxUcastPackets() uint64 {
+	if m != nil {
+		return m.TxUcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxMcastPackets() uint64 {
+	if m != nil {
+		return m.TxMcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxBcastPackets() uint64 {
+	if m != nil {
+		return m.TxBcastPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTxErrorPackets() uint64 {
+	if m != nil {
+		return m.TxErrorPackets
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetRxCrcErrors() uint64 {
+	if m != nil {
+		return m.RxCrcErrors
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetBipErrors() uint64 {
+	if m != nil {
+		return m.BipErrors
+	}
+	return 0
+}
+
+func (m *PortStatistics) GetTimestamp() uint32 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+type FlowStatistics struct {
+	FlowId               uint32   `protobuf:"fixed32,1,opt,name=flow_id,json=flowId,proto3" json:"flow_id,omitempty"`
+	RxBytes              uint64   `protobuf:"fixed64,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxPackets            uint64   `protobuf:"fixed64,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	TxBytes              uint64   `protobuf:"fixed64,8,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"fixed64,9,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	Timestamp            uint32   `protobuf:"fixed32,16,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FlowStatistics) Reset()         { *m = FlowStatistics{} }
+func (m *FlowStatistics) String() string { return proto.CompactTextString(m) }
+func (*FlowStatistics) ProtoMessage()    {}
+func (*FlowStatistics) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{22}
+}
+
+func (m *FlowStatistics) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowStatistics.Unmarshal(m, b)
+}
+func (m *FlowStatistics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowStatistics.Marshal(b, m, deterministic)
+}
+func (m *FlowStatistics) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowStatistics.Merge(m, src)
+}
+func (m *FlowStatistics) XXX_Size() int {
+	return xxx_messageInfo_FlowStatistics.Size(m)
+}
+func (m *FlowStatistics) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowStatistics.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowStatistics proto.InternalMessageInfo
+
+func (m *FlowStatistics) GetFlowId() uint32 {
+	if m != nil {
+		return m.FlowId
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *FlowStatistics) GetTimestamp() uint32 {
+	if m != nil {
+		return m.Timestamp
+	}
+	return 0
+}
+
+type LosIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	Status               string   `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LosIndication) Reset()         { *m = LosIndication{} }
+func (m *LosIndication) String() string { return proto.CompactTextString(m) }
+func (*LosIndication) ProtoMessage()    {}
+func (*LosIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{23}
+}
+
+func (m *LosIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LosIndication.Unmarshal(m, b)
+}
+func (m *LosIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LosIndication.Marshal(b, m, deterministic)
+}
+func (m *LosIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LosIndication.Merge(m, src)
+}
+func (m *LosIndication) XXX_Size() int {
+	return xxx_messageInfo_LosIndication.Size(m)
+}
+func (m *LosIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_LosIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LosIndication proto.InternalMessageInfo
+
+func (m *LosIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *LosIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type DyingGaspIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DyingGaspIndication) Reset()         { *m = DyingGaspIndication{} }
+func (m *DyingGaspIndication) String() string { return proto.CompactTextString(m) }
+func (*DyingGaspIndication) ProtoMessage()    {}
+func (*DyingGaspIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{24}
+}
+
+func (m *DyingGaspIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DyingGaspIndication.Unmarshal(m, b)
+}
+func (m *DyingGaspIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DyingGaspIndication.Marshal(b, m, deterministic)
+}
+func (m *DyingGaspIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DyingGaspIndication.Merge(m, src)
+}
+func (m *DyingGaspIndication) XXX_Size() int {
+	return xxx_messageInfo_DyingGaspIndication.Size(m)
+}
+func (m *DyingGaspIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_DyingGaspIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DyingGaspIndication proto.InternalMessageInfo
+
+func (m *DyingGaspIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *DyingGaspIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *DyingGaspIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuAlarmIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	LosStatus            string   `protobuf:"bytes,3,opt,name=los_status,json=losStatus,proto3" json:"los_status,omitempty"`
+	LobStatus            string   `protobuf:"bytes,4,opt,name=lob_status,json=lobStatus,proto3" json:"lob_status,omitempty"`
+	LopcMissStatus       string   `protobuf:"bytes,5,opt,name=lopc_miss_status,json=lopcMissStatus,proto3" json:"lopc_miss_status,omitempty"`
+	LopcMicErrorStatus   string   `protobuf:"bytes,6,opt,name=lopc_mic_error_status,json=lopcMicErrorStatus,proto3" json:"lopc_mic_error_status,omitempty"`
+	LofiStatus           string   `protobuf:"bytes,7,opt,name=lofi_status,json=lofiStatus,proto3" json:"lofi_status,omitempty"`
+	LoamiStatus          string   `protobuf:"bytes,8,opt,name=loami_status,json=loamiStatus,proto3" json:"loami_status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuAlarmIndication) Reset()         { *m = OnuAlarmIndication{} }
+func (m *OnuAlarmIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuAlarmIndication) ProtoMessage()    {}
+func (*OnuAlarmIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{25}
+}
+
+func (m *OnuAlarmIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuAlarmIndication.Unmarshal(m, b)
+}
+func (m *OnuAlarmIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuAlarmIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuAlarmIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuAlarmIndication.Merge(m, src)
+}
+func (m *OnuAlarmIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuAlarmIndication.Size(m)
+}
+func (m *OnuAlarmIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuAlarmIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuAlarmIndication proto.InternalMessageInfo
+
+func (m *OnuAlarmIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuAlarmIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuAlarmIndication) GetLosStatus() string {
+	if m != nil {
+		return m.LosStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLobStatus() string {
+	if m != nil {
+		return m.LobStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLopcMissStatus() string {
+	if m != nil {
+		return m.LopcMissStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLopcMicErrorStatus() string {
+	if m != nil {
+		return m.LopcMicErrorStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLofiStatus() string {
+	if m != nil {
+		return m.LofiStatus
+	}
+	return ""
+}
+
+func (m *OnuAlarmIndication) GetLoamiStatus() string {
+	if m != nil {
+		return m.LoamiStatus
+	}
+	return ""
+}
+
+type OnuStartupFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuStartupFailureIndication) Reset()         { *m = OnuStartupFailureIndication{} }
+func (m *OnuStartupFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuStartupFailureIndication) ProtoMessage()    {}
+func (*OnuStartupFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{26}
+}
+
+func (m *OnuStartupFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuStartupFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuStartupFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuStartupFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuStartupFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuStartupFailureIndication.Merge(m, src)
+}
+func (m *OnuStartupFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuStartupFailureIndication.Size(m)
+}
+func (m *OnuStartupFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuStartupFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuStartupFailureIndication proto.InternalMessageInfo
+
+func (m *OnuStartupFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuStartupFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuStartupFailureIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuSignalDegradeIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	InverseBitErrorRate  uint32   `protobuf:"fixed32,4,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuSignalDegradeIndication) Reset()         { *m = OnuSignalDegradeIndication{} }
+func (m *OnuSignalDegradeIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuSignalDegradeIndication) ProtoMessage()    {}
+func (*OnuSignalDegradeIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{27}
+}
+
+func (m *OnuSignalDegradeIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Unmarshal(m, b)
+}
+func (m *OnuSignalDegradeIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuSignalDegradeIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuSignalDegradeIndication.Merge(m, src)
+}
+func (m *OnuSignalDegradeIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuSignalDegradeIndication.Size(m)
+}
+func (m *OnuSignalDegradeIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuSignalDegradeIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuSignalDegradeIndication proto.InternalMessageInfo
+
+func (m *OnuSignalDegradeIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuSignalDegradeIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuSignalDegradeIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuSignalDegradeIndication) GetInverseBitErrorRate() uint32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+type OnuDriftOfWindowIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	Drift                uint32   `protobuf:"fixed32,4,opt,name=drift,proto3" json:"drift,omitempty"`
+	NewEqd               uint32   `protobuf:"fixed32,5,opt,name=new_eqd,json=newEqd,proto3" json:"new_eqd,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuDriftOfWindowIndication) Reset()         { *m = OnuDriftOfWindowIndication{} }
+func (m *OnuDriftOfWindowIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuDriftOfWindowIndication) ProtoMessage()    {}
+func (*OnuDriftOfWindowIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{28}
+}
+
+func (m *OnuDriftOfWindowIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Unmarshal(m, b)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuDriftOfWindowIndication.Merge(m, src)
+}
+func (m *OnuDriftOfWindowIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuDriftOfWindowIndication.Size(m)
+}
+func (m *OnuDriftOfWindowIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuDriftOfWindowIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuDriftOfWindowIndication proto.InternalMessageInfo
+
+func (m *OnuDriftOfWindowIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuDriftOfWindowIndication) GetDrift() uint32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+func (m *OnuDriftOfWindowIndication) GetNewEqd() uint32 {
+	if m != nil {
+		return m.NewEqd
+	}
+	return 0
+}
+
+type OnuLossOfOmciChannelIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuLossOfOmciChannelIndication) Reset()         { *m = OnuLossOfOmciChannelIndication{} }
+func (m *OnuLossOfOmciChannelIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuLossOfOmciChannelIndication) ProtoMessage()    {}
+func (*OnuLossOfOmciChannelIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{29}
+}
+
+func (m *OnuLossOfOmciChannelIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Unmarshal(m, b)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuLossOfOmciChannelIndication.Merge(m, src)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuLossOfOmciChannelIndication.Size(m)
+}
+func (m *OnuLossOfOmciChannelIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuLossOfOmciChannelIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuLossOfOmciChannelIndication proto.InternalMessageInfo
+
+func (m *OnuLossOfOmciChannelIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuLossOfOmciChannelIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuLossOfOmciChannelIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuSignalsFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	InverseBitErrorRate  uint32   `protobuf:"fixed32,4,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuSignalsFailureIndication) Reset()         { *m = OnuSignalsFailureIndication{} }
+func (m *OnuSignalsFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuSignalsFailureIndication) ProtoMessage()    {}
+func (*OnuSignalsFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{30}
+}
+
+func (m *OnuSignalsFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuSignalsFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuSignalsFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuSignalsFailureIndication.Merge(m, src)
+}
+func (m *OnuSignalsFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuSignalsFailureIndication.Size(m)
+}
+func (m *OnuSignalsFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuSignalsFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuSignalsFailureIndication proto.InternalMessageInfo
+
+func (m *OnuSignalsFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuSignalsFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuSignalsFailureIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuSignalsFailureIndication) GetInverseBitErrorRate() uint32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+type OnuTransmissionInterferenceWarning struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	Drift                uint32   `protobuf:"fixed32,4,opt,name=drift,proto3" json:"drift,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuTransmissionInterferenceWarning) Reset()         { *m = OnuTransmissionInterferenceWarning{} }
+func (m *OnuTransmissionInterferenceWarning) String() string { return proto.CompactTextString(m) }
+func (*OnuTransmissionInterferenceWarning) ProtoMessage()    {}
+func (*OnuTransmissionInterferenceWarning) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{31}
+}
+
+func (m *OnuTransmissionInterferenceWarning) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Unmarshal(m, b)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Marshal(b, m, deterministic)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuTransmissionInterferenceWarning.Merge(m, src)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_Size() int {
+	return xxx_messageInfo_OnuTransmissionInterferenceWarning.Size(m)
+}
+func (m *OnuTransmissionInterferenceWarning) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuTransmissionInterferenceWarning.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuTransmissionInterferenceWarning proto.InternalMessageInfo
+
+func (m *OnuTransmissionInterferenceWarning) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *OnuTransmissionInterferenceWarning) GetDrift() uint32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+type OnuActivationFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	FailReason           uint32   `protobuf:"fixed32,3,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuActivationFailureIndication) Reset()         { *m = OnuActivationFailureIndication{} }
+func (m *OnuActivationFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuActivationFailureIndication) ProtoMessage()    {}
+func (*OnuActivationFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{32}
+}
+
+func (m *OnuActivationFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuActivationFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuActivationFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuActivationFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuActivationFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuActivationFailureIndication.Merge(m, src)
+}
+func (m *OnuActivationFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuActivationFailureIndication.Size(m)
+}
+func (m *OnuActivationFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuActivationFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuActivationFailureIndication proto.InternalMessageInfo
+
+func (m *OnuActivationFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuActivationFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuActivationFailureIndication) GetFailReason() uint32 {
+	if m != nil {
+		return m.FailReason
+	}
+	return 0
+}
+
+type OnuLossOfKeySyncFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuLossOfKeySyncFailureIndication) Reset()         { *m = OnuLossOfKeySyncFailureIndication{} }
+func (m *OnuLossOfKeySyncFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuLossOfKeySyncFailureIndication) ProtoMessage()    {}
+func (*OnuLossOfKeySyncFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{33}
+}
+
+func (m *OnuLossOfKeySyncFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuLossOfKeySyncFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuLossOfKeySyncFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuLossOfKeySyncFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuLossOfKeySyncFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuLossOfKeySyncFailureIndication.Merge(m, src)
+}
+func (m *OnuLossOfKeySyncFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuLossOfKeySyncFailureIndication.Size(m)
+}
+func (m *OnuLossOfKeySyncFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuLossOfKeySyncFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuLossOfKeySyncFailureIndication proto.InternalMessageInfo
+
+func (m *OnuLossOfKeySyncFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuLossOfKeySyncFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuLossOfKeySyncFailureIndication) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+type OnuItuPonStatsIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	RdiErrors            uint32   `protobuf:"fixed32,3,opt,name=rdi_errors,json=rdiErrors,proto3" json:"rdi_errors,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuItuPonStatsIndication) Reset()         { *m = OnuItuPonStatsIndication{} }
+func (m *OnuItuPonStatsIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonStatsIndication) ProtoMessage()    {}
+func (*OnuItuPonStatsIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{34}
+}
+
+func (m *OnuItuPonStatsIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonStatsIndication.Unmarshal(m, b)
+}
+func (m *OnuItuPonStatsIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonStatsIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonStatsIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonStatsIndication.Merge(m, src)
+}
+func (m *OnuItuPonStatsIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonStatsIndication.Size(m)
+}
+func (m *OnuItuPonStatsIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonStatsIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonStatsIndication proto.InternalMessageInfo
+
+func (m *OnuItuPonStatsIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuItuPonStatsIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuItuPonStatsIndication) GetRdiErrors() uint32 {
+	if m != nil {
+		return m.RdiErrors
+	}
+	return 0
+}
+
+type OnuProcessingErrorIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuProcessingErrorIndication) Reset()         { *m = OnuProcessingErrorIndication{} }
+func (m *OnuProcessingErrorIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuProcessingErrorIndication) ProtoMessage()    {}
+func (*OnuProcessingErrorIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{35}
+}
+
+func (m *OnuProcessingErrorIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Unmarshal(m, b)
+}
+func (m *OnuProcessingErrorIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuProcessingErrorIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuProcessingErrorIndication.Merge(m, src)
+}
+func (m *OnuProcessingErrorIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuProcessingErrorIndication.Size(m)
+}
+func (m *OnuProcessingErrorIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuProcessingErrorIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuProcessingErrorIndication proto.InternalMessageInfo
+
+func (m *OnuProcessingErrorIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuProcessingErrorIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type OnuDeactivationFailureIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	FailReason           uint32   `protobuf:"fixed32,3,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuDeactivationFailureIndication) Reset()         { *m = OnuDeactivationFailureIndication{} }
+func (m *OnuDeactivationFailureIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuDeactivationFailureIndication) ProtoMessage()    {}
+func (*OnuDeactivationFailureIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{36}
+}
+
+func (m *OnuDeactivationFailureIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuDeactivationFailureIndication.Unmarshal(m, b)
+}
+func (m *OnuDeactivationFailureIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuDeactivationFailureIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuDeactivationFailureIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuDeactivationFailureIndication.Merge(m, src)
+}
+func (m *OnuDeactivationFailureIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuDeactivationFailureIndication.Size(m)
+}
+func (m *OnuDeactivationFailureIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuDeactivationFailureIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuDeactivationFailureIndication proto.InternalMessageInfo
+
+func (m *OnuDeactivationFailureIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuDeactivationFailureIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuDeactivationFailureIndication) GetFailReason() uint32 {
+	if m != nil {
+		return m.FailReason
+	}
+	return 0
+}
+
+type OnuRemoteDefectIndication struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	RdiErrors            uint32   `protobuf:"fixed32,3,opt,name=rdi_errors,json=rdiErrors,proto3" json:"rdi_errors,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuRemoteDefectIndication) Reset()         { *m = OnuRemoteDefectIndication{} }
+func (m *OnuRemoteDefectIndication) String() string { return proto.CompactTextString(m) }
+func (*OnuRemoteDefectIndication) ProtoMessage()    {}
+func (*OnuRemoteDefectIndication) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{37}
+}
+
+func (m *OnuRemoteDefectIndication) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuRemoteDefectIndication.Unmarshal(m, b)
+}
+func (m *OnuRemoteDefectIndication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuRemoteDefectIndication.Marshal(b, m, deterministic)
+}
+func (m *OnuRemoteDefectIndication) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuRemoteDefectIndication.Merge(m, src)
+}
+func (m *OnuRemoteDefectIndication) XXX_Size() int {
+	return xxx_messageInfo_OnuRemoteDefectIndication.Size(m)
+}
+func (m *OnuRemoteDefectIndication) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuRemoteDefectIndication.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuRemoteDefectIndication proto.InternalMessageInfo
+
+func (m *OnuRemoteDefectIndication) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *OnuRemoteDefectIndication) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuRemoteDefectIndication) GetRdiErrors() uint32 {
+	if m != nil {
+		return m.RdiErrors
+	}
+	return 0
+}
+
+type GroupMember struct {
+	InterfaceId          uint32                    `protobuf:"varint,1,opt,name=interface_id,json=interfaceId,proto3" json:"interface_id,omitempty"`
+	InterfaceType        GroupMember_InterfaceType `protobuf:"varint,2,opt,name=interface_type,json=interfaceType,proto3,enum=openolt.GroupMember_InterfaceType" json:"interface_type,omitempty"`
+	GemPortId            uint32                    `protobuf:"varint,3,opt,name=gem_port_id,json=gemPortId,proto3" json:"gem_port_id,omitempty"`
+	Priority             uint32                    `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *GroupMember) Reset()         { *m = GroupMember{} }
+func (m *GroupMember) String() string { return proto.CompactTextString(m) }
+func (*GroupMember) ProtoMessage()    {}
+func (*GroupMember) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{38}
+}
+
+func (m *GroupMember) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GroupMember.Unmarshal(m, b)
+}
+func (m *GroupMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GroupMember.Marshal(b, m, deterministic)
+}
+func (m *GroupMember) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GroupMember.Merge(m, src)
+}
+func (m *GroupMember) XXX_Size() int {
+	return xxx_messageInfo_GroupMember.Size(m)
+}
+func (m *GroupMember) XXX_DiscardUnknown() {
+	xxx_messageInfo_GroupMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GroupMember proto.InternalMessageInfo
+
+func (m *GroupMember) GetInterfaceId() uint32 {
+	if m != nil {
+		return m.InterfaceId
+	}
+	return 0
+}
+
+func (m *GroupMember) GetInterfaceType() GroupMember_InterfaceType {
+	if m != nil {
+		return m.InterfaceType
+	}
+	return GroupMember_PON
+}
+
+func (m *GroupMember) GetGemPortId() uint32 {
+	if m != nil {
+		return m.GemPortId
+	}
+	return 0
+}
+
+func (m *GroupMember) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+type Group struct {
+	GroupId              uint32                    `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	Command              Group_GroupMembersCommand `protobuf:"varint,2,opt,name=command,proto3,enum=openolt.Group_GroupMembersCommand" json:"command,omitempty"`
+	Members              []*GroupMember            `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
+	Action               *Action                   `protobuf:"bytes,4,opt,name=action,proto3" json:"action,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *Group) Reset()         { *m = Group{} }
+func (m *Group) String() string { return proto.CompactTextString(m) }
+func (*Group) ProtoMessage()    {}
+func (*Group) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{39}
+}
+
+func (m *Group) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Group.Unmarshal(m, b)
+}
+func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Group.Marshal(b, m, deterministic)
+}
+func (m *Group) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Group.Merge(m, src)
+}
+func (m *Group) XXX_Size() int {
+	return xxx_messageInfo_Group.Size(m)
+}
+func (m *Group) XXX_DiscardUnknown() {
+	xxx_messageInfo_Group.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Group proto.InternalMessageInfo
+
+func (m *Group) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *Group) GetCommand() Group_GroupMembersCommand {
+	if m != nil {
+		return m.Command
+	}
+	return Group_ADD_MEMBERS
+}
+
+func (m *Group) GetMembers() []*GroupMember {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+func (m *Group) GetAction() *Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c072e7aa0dfd74d5, []int{40}
+}
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("openolt.DeviceInfo_DeviceResourceRanges_Pool_PoolType", DeviceInfo_DeviceResourceRanges_Pool_PoolType_name, DeviceInfo_DeviceResourceRanges_Pool_PoolType_value)
+	proto.RegisterEnum("openolt.DeviceInfo_DeviceResourceRanges_Pool_SharingType", DeviceInfo_DeviceResourceRanges_Pool_SharingType_name, DeviceInfo_DeviceResourceRanges_Pool_SharingType_value)
+	proto.RegisterEnum("openolt.GroupMember_InterfaceType", GroupMember_InterfaceType_name, GroupMember_InterfaceType_value)
+	proto.RegisterEnum("openolt.Group_GroupMembersCommand", Group_GroupMembersCommand_name, Group_GroupMembersCommand_value)
+	proto.RegisterType((*Indication)(nil), "openolt.Indication")
+	proto.RegisterType((*AlarmIndication)(nil), "openolt.AlarmIndication")
+	proto.RegisterType((*OltIndication)(nil), "openolt.OltIndication")
+	proto.RegisterType((*IntfIndication)(nil), "openolt.IntfIndication")
+	proto.RegisterType((*OnuDiscIndication)(nil), "openolt.OnuDiscIndication")
+	proto.RegisterType((*OnuIndication)(nil), "openolt.OnuIndication")
+	proto.RegisterType((*IntfOperIndication)(nil), "openolt.IntfOperIndication")
+	proto.RegisterType((*OmciIndication)(nil), "openolt.OmciIndication")
+	proto.RegisterType((*PacketIndication)(nil), "openolt.PacketIndication")
+	proto.RegisterType((*Interface)(nil), "openolt.Interface")
+	proto.RegisterType((*Heartbeat)(nil), "openolt.Heartbeat")
+	proto.RegisterType((*Onu)(nil), "openolt.Onu")
+	proto.RegisterType((*OmciMsg)(nil), "openolt.OmciMsg")
+	proto.RegisterType((*OnuPacket)(nil), "openolt.OnuPacket")
+	proto.RegisterType((*UplinkPacket)(nil), "openolt.UplinkPacket")
+	proto.RegisterType((*DeviceInfo)(nil), "openolt.DeviceInfo")
+	proto.RegisterType((*DeviceInfo_DeviceResourceRanges)(nil), "openolt.DeviceInfo.DeviceResourceRanges")
+	proto.RegisterType((*DeviceInfo_DeviceResourceRanges_Pool)(nil), "openolt.DeviceInfo.DeviceResourceRanges.Pool")
+	proto.RegisterType((*Classifier)(nil), "openolt.Classifier")
+	proto.RegisterType((*ActionCmd)(nil), "openolt.ActionCmd")
+	proto.RegisterType((*Action)(nil), "openolt.Action")
+	proto.RegisterType((*Flow)(nil), "openolt.Flow")
+	proto.RegisterType((*SerialNumber)(nil), "openolt.SerialNumber")
+	proto.RegisterType((*PortStatistics)(nil), "openolt.PortStatistics")
+	proto.RegisterType((*FlowStatistics)(nil), "openolt.FlowStatistics")
+	proto.RegisterType((*LosIndication)(nil), "openolt.LosIndication")
+	proto.RegisterType((*DyingGaspIndication)(nil), "openolt.DyingGaspIndication")
+	proto.RegisterType((*OnuAlarmIndication)(nil), "openolt.OnuAlarmIndication")
+	proto.RegisterType((*OnuStartupFailureIndication)(nil), "openolt.OnuStartupFailureIndication")
+	proto.RegisterType((*OnuSignalDegradeIndication)(nil), "openolt.OnuSignalDegradeIndication")
+	proto.RegisterType((*OnuDriftOfWindowIndication)(nil), "openolt.OnuDriftOfWindowIndication")
+	proto.RegisterType((*OnuLossOfOmciChannelIndication)(nil), "openolt.OnuLossOfOmciChannelIndication")
+	proto.RegisterType((*OnuSignalsFailureIndication)(nil), "openolt.OnuSignalsFailureIndication")
+	proto.RegisterType((*OnuTransmissionInterferenceWarning)(nil), "openolt.OnuTransmissionInterferenceWarning")
+	proto.RegisterType((*OnuActivationFailureIndication)(nil), "openolt.OnuActivationFailureIndication")
+	proto.RegisterType((*OnuLossOfKeySyncFailureIndication)(nil), "openolt.OnuLossOfKeySyncFailureIndication")
+	proto.RegisterType((*OnuItuPonStatsIndication)(nil), "openolt.OnuItuPonStatsIndication")
+	proto.RegisterType((*OnuProcessingErrorIndication)(nil), "openolt.OnuProcessingErrorIndication")
+	proto.RegisterType((*OnuDeactivationFailureIndication)(nil), "openolt.OnuDeactivationFailureIndication")
+	proto.RegisterType((*OnuRemoteDefectIndication)(nil), "openolt.OnuRemoteDefectIndication")
+	proto.RegisterType((*GroupMember)(nil), "openolt.GroupMember")
+	proto.RegisterType((*Group)(nil), "openolt.Group")
+	proto.RegisterType((*Empty)(nil), "openolt.Empty")
+}
+
+func init() { proto.RegisterFile("voltha_protos/openolt.proto", fileDescriptor_c072e7aa0dfd74d5) }
+
+var fileDescriptor_c072e7aa0dfd74d5 = []byte{
+	// 3558 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x5a, 0x4f, 0x73, 0x23, 0x49,
+	0x56, 0x6f, 0xd9, 0xb2, 0x4a, 0x7a, 0xfa, 0x63, 0x75, 0xba, 0xdd, 0xed, 0x7f, 0xdb, 0xed, 0x29,
+	0x9a, 0x9d, 0xde, 0x81, 0xb5, 0x67, 0x7a, 0x36, 0x80, 0x1d, 0x16, 0x18, 0xdb, 0x52, 0xb7, 0xc5,
+	0xb4, 0x5b, 0xa6, 0xac, 0x9e, 0x86, 0x25, 0x26, 0x6a, 0xcb, 0x55, 0x29, 0x39, 0x71, 0xa9, 0xb2,
+	0xa6, 0x2a, 0x65, 0xb7, 0xaf, 0x0b, 0xcb, 0x07, 0x60, 0x83, 0x03, 0x1c, 0xe1, 0x0b, 0x70, 0xdc,
+	0x88, 0x39, 0x72, 0x25, 0xb8, 0xf0, 0x15, 0xf8, 0x0c, 0x9c, 0x38, 0x10, 0xf9, 0x32, 0xeb, 0x9f,
+	0x24, 0xbb, 0xdb, 0x83, 0x09, 0x2e, 0x0e, 0xe7, 0x7b, 0xbf, 0xf7, 0xcb, 0x7c, 0x99, 0x2f, 0x5f,
+	0xbe, 0xca, 0x14, 0x6c, 0x5e, 0x70, 0x5f, 0x9c, 0x39, 0x76, 0x18, 0x71, 0xc1, 0xe3, 0x5d, 0x1e,
+	0xd2, 0x80, 0xfb, 0x62, 0x07, 0x9b, 0xc4, 0xd0, 0xcd, 0x8d, 0xad, 0x11, 0xe7, 0x23, 0x9f, 0xee,
+	0x3a, 0x21, 0xdb, 0x75, 0x82, 0x80, 0x0b, 0x47, 0x30, 0x1e, 0xc4, 0x0a, 0xb6, 0xb1, 0x5d, 0xe4,
+	0x10, 0xd4, 0x3d, 0x93, 0xff, 0x0f, 0x99, 0x4f, 0x15, 0xc2, 0xfc, 0xb7, 0x32, 0x40, 0x2f, 0xf0,
+	0x98, 0x8b, 0x76, 0xe4, 0x33, 0x30, 0xb8, 0x2f, 0x6c, 0x16, 0x78, 0x6b, 0xa5, 0xed, 0xd2, 0xb3,
+	0xfa, 0xf3, 0x87, 0x3b, 0x49, 0xc7, 0x7d, 0x5f, 0x64, 0xc0, 0xc3, 0x7b, 0x56, 0x85, 0xa3, 0x80,
+	0xfc, 0x04, 0xaa, 0x2c, 0x10, 0x43, 0xb4, 0x59, 0x40, 0x9b, 0x47, 0xa9, 0x4d, 0x2f, 0x10, 0xc3,
+	0x82, 0x91, 0xc1, 0x94, 0x84, 0xec, 0x41, 0x13, 0xad, 0x78, 0x48, 0x23, 0x34, 0x5d, 0x44, 0xd3,
+	0xcd, 0x82, 0x69, 0x3f, 0xa4, 0x51, 0xc1, 0xbc, 0xce, 0x32, 0x29, 0xf9, 0x63, 0x68, 0xf0, 0x60,
+	0x62, 0x7b, 0x2c, 0x76, 0x91, 0xa1, 0x8c, 0x0c, 0x1b, 0xd9, 0x80, 0x83, 0x49, 0x87, 0xc5, 0x6e,
+	0x81, 0x00, 0x78, 0x2a, 0x44, 0x5f, 0x83, 0x09, 0x9a, 0x2e, 0x4d, 0xfb, 0x1a, 0x4c, 0xa6, 0x7c,
+	0x45, 0x81, 0xf4, 0x95, 0x8f, 0x5d, 0x86, 0x36, 0x95, 0x29, 0x5f, 0xfb, 0x63, 0x97, 0x15, 0x7d,
+	0xe5, 0x4a, 0x42, 0x7e, 0x02, 0x46, 0x78, 0xae, 0x26, 0xd5, 0x40, 0xa3, 0xf5, 0xd4, 0xe8, 0xd8,
+	0x71, 0xcf, 0xe9, 0xd4, 0xbc, 0x86, 0xe7, 0x38, 0xaf, 0x7f, 0x00, 0x10, 0xf2, 0x48, 0xd8, 0xb1,
+	0x70, 0x44, 0xbc, 0x56, 0x9d, 0xea, 0xed, 0x98, 0x47, 0xe2, 0x44, 0x2e, 0x76, 0x2c, 0x98, 0x1b,
+	0x1f, 0xde, 0xb3, 0x6a, 0xa1, 0x96, 0xc4, 0xd2, 0x72, 0xe8, 0xf3, 0x4b, 0x6d, 0x59, 0x9b, 0xb2,
+	0x7c, 0xe1, 0xf3, 0xcb, 0xa2, 0xe5, 0x50, 0x4b, 0x62, 0xf2, 0xfb, 0x50, 0x73, 0x7c, 0x27, 0x1a,
+	0xe3, 0x58, 0x01, 0x0d, 0xd7, 0x52, 0xc3, 0x3d, 0xa9, 0x29, 0x0c, 0xb5, 0xea, 0x68, 0xd1, 0x7e,
+	0x05, 0xca, 0x9e, 0x23, 0x1c, 0xf3, 0x5f, 0x00, 0x96, 0xa7, 0x70, 0x72, 0x9e, 0x7d, 0x1e, 0xcf,
+	0x8d, 0xa9, 0x57, 0x3c, 0x2e, 0xfa, 0xee, 0xa3, 0x80, 0x74, 0xa0, 0xe5, 0x5d, 0xb1, 0x60, 0x64,
+	0x8f, 0x9c, 0x38, 0xcc, 0x45, 0xd6, 0x56, 0x6a, 0xd9, 0x91, 0xea, 0x97, 0x4e, 0x1c, 0x16, 0xec,
+	0x1b, 0x5e, 0x4e, 0x2c, 0x63, 0x4c, 0x2e, 0x70, 0xe6, 0xd1, 0x74, 0x8c, 0xf5, 0x83, 0xc9, 0xac,
+	0x53, 0x75, 0x9e, 0x49, 0xc9, 0x5b, 0x78, 0x20, 0x29, 0x62, 0xe1, 0x44, 0x62, 0x12, 0xda, 0x43,
+	0x87, 0xf9, 0xb9, 0x58, 0x7b, 0x9a, 0x67, 0x3a, 0x51, 0x98, 0x17, 0x0e, 0xf3, 0x27, 0x11, 0x2d,
+	0x50, 0xde, 0xe7, 0x05, 0xb5, 0x24, 0xfe, 0x39, 0x3c, 0x44, 0x62, 0x36, 0x0a, 0x1c, 0xdf, 0xf6,
+	0xe8, 0x28, 0x72, 0x3c, 0x9a, 0x8b, 0xc5, 0xdf, 0x2a, 0x50, 0x23, 0xaa, 0xa3, 0x40, 0x05, 0xe6,
+	0x15, 0x3e, 0xab, 0x25, 0x7f, 0x09, 0x8f, 0x70, 0x63, 0x44, 0x6c, 0x28, 0x6c, 0x3e, 0xb4, 0x2f,
+	0x59, 0xe0, 0xf1, 0xcb, 0x5c, 0xd0, 0x16, 0xc8, 0x3b, 0x12, 0xd6, 0x1f, 0xbe, 0x45, 0xd0, 0x0c,
+	0xf9, 0xb4, 0x96, 0x0c, 0x40, 0x7a, 0x63, 0xfb, 0x3c, 0x8e, 0xed, 0x74, 0x2f, 0xa8, 0xb0, 0xfe,
+	0x38, 0x4f, 0xfb, 0x8a, 0xc7, 0x71, 0x7f, 0x28, 0x37, 0xc5, 0xc1, 0x99, 0x13, 0x04, 0xd4, 0x2f,
+	0x50, 0xb7, 0xb8, 0x46, 0xe8, 0x2d, 0x92, 0xcc, 0x33, 0xba, 0x12, 0x67, 0xf3, 0x5c, 0x9d, 0x33,
+	0xcf, 0x0a, 0x73, 0xed, 0x3c, 0x67, 0x6a, 0x49, 0xdc, 0x57, 0x49, 0x42, 0xb0, 0x4b, 0x35, 0x52,
+	0xb5, 0x1b, 0x7e, 0x27, 0x4f, 0x38, 0x88, 0x9c, 0x20, 0x1e, 0xb3, 0x38, 0x66, 0x3c, 0xe8, 0x05,
+	0x82, 0x46, 0x43, 0x1a, 0xd1, 0xc0, 0xa5, 0x6f, 0x9d, 0x28, 0x60, 0xc1, 0x48, 0x67, 0x8d, 0x01,
+	0xbb, 0xc4, 0x91, 0xfe, 0x42, 0x4d, 0xae, 0xe3, 0x0a, 0x76, 0x81, 0xfd, 0x66, 0x83, 0x85, 0xd9,
+	0x59, 0xd8, 0x4b, 0x61, 0xf3, 0xc6, 0x2b, 0x7d, 0x2e, 0x22, 0x54, 0x0f, 0x6b, 0xb2, 0x87, 0x30,
+	0xe2, 0x2e, 0x8d, 0x63, 0xb9, 0x0b, 0x68, 0x14, 0x71, 0x95, 0x25, 0xeb, 0xd8, 0xc5, 0x6f, 0xe7,
+	0xbb, 0x38, 0x4e, 0x71, 0x5d, 0x09, 0x2b, 0x74, 0xb0, 0xca, 0xe7, 0xe9, 0x09, 0x85, 0xf5, 0x6c,
+	0x0d, 0x87, 0x76, 0x7c, 0x15, 0xb8, 0x99, 0x17, 0x0d, 0xec, 0xe2, 0x93, 0xd9, 0xb5, 0xfc, 0x8a,
+	0x5e, 0x9d, 0x5c, 0x05, 0xee, 0x75, 0x8e, 0x28, 0x50, 0x82, 0x90, 0xdd, 0xbc, 0x81, 0x55, 0x4c,
+	0xb0, 0x62, 0x62, 0x87, 0x3c, 0x50, 0xe9, 0x08, 0xbb, 0x68, 0x62, 0x17, 0x1f, 0x15, 0xd2, 0xad,
+	0x98, 0x1c, 0xf3, 0x00, 0xb3, 0xd0, 0xcc, 0x92, 0x16, 0x75, 0xc4, 0x87, 0x2d, 0x0c, 0x6f, 0x3a,
+	0xb5, 0x06, 0x93, 0x48, 0x6d, 0xa0, 0x16, 0xb2, 0xff, 0xa8, 0x10, 0xe3, 0x39, 0xec, 0xbc, 0xf1,
+	0xcb, 0xe9, 0x98, 0x8f, 0x21, 0x6f, 0x95, 0x13, 0x11, 0x1d, 0x73, 0x41, 0x6d, 0x8f, 0x0e, 0xa9,
+	0xab, 0x52, 0xf9, 0x32, 0x76, 0x63, 0xe6, 0xbb, 0xb1, 0x10, 0xd4, 0x41, 0x4c, 0x81, 0x9f, 0xf0,
+	0x19, 0x65, 0x9a, 0x32, 0x77, 0xa0, 0x59, 0x38, 0x5a, 0xc9, 0x0f, 0x00, 0xf0, 0x54, 0x94, 0xf3,
+	0x45, 0x31, 0x65, 0xd6, 0xac, 0x9a, 0x94, 0xc8, 0x19, 0xa0, 0xe6, 0x21, 0xb4, 0x8a, 0xc7, 0x2a,
+	0x79, 0x04, 0x86, 0x3a, 0x81, 0x55, 0x82, 0x35, 0xac, 0x0a, 0x9e, 0xb2, 0xde, 0x14, 0xd3, 0xc2,
+	0x34, 0xd3, 0x19, 0xdc, 0x9f, 0x39, 0x23, 0xaf, 0x27, 0xfb, 0x02, 0x9a, 0x31, 0x8d, 0x98, 0xe3,
+	0xdb, 0xc1, 0x64, 0x7c, 0x4a, 0x23, 0x9d, 0x92, 0x57, 0xd3, 0x09, 0x38, 0x41, 0xed, 0x6b, 0x54,
+	0x5a, 0x8d, 0x38, 0xd7, 0x32, 0xbf, 0x2b, 0x41, 0xb3, 0x70, 0xa6, 0x5e, 0xdf, 0xcd, 0x2a, 0x54,
+	0x30, 0x68, 0x54, 0xca, 0x37, 0xac, 0x25, 0x19, 0x00, 0xd3, 0xae, 0x2c, 0x4e, 0xb9, 0x42, 0x9e,
+	0x40, 0xdd, 0xf1, 0xc6, 0x2c, 0xd0, 0xfa, 0x25, 0xd4, 0x03, 0x8a, 0x14, 0x60, 0x66, 0xf4, 0xe5,
+	0x0f, 0x1f, 0xfd, 0x2f, 0x80, 0xcc, 0x56, 0x23, 0x84, 0x40, 0x59, 0x5c, 0x85, 0xc9, 0x02, 0xe1,
+	0xff, 0x79, 0xaf, 0x16, 0x6e, 0x58, 0x89, 0xe9, 0xe1, 0x9b, 0x16, 0xb4, 0x8a, 0xe5, 0xc3, 0xad,
+	0xe7, 0xa7, 0x0d, 0x8b, 0xe1, 0xb9, 0x40, 0xe6, 0x86, 0x25, 0xff, 0x35, 0xff, 0xb5, 0x04, 0xed,
+	0xe9, 0xf2, 0x82, 0x6c, 0x42, 0x0d, 0x69, 0x71, 0xe4, 0x6a, 0x96, 0xb0, 0x7a, 0x1b, 0x4c, 0x8d,
+	0x7e, 0x26, 0x8e, 0x46, 0x74, 0x8c, 0xd5, 0x48, 0xda, 0x6f, 0x4d, 0x4b, 0x7a, 0x9e, 0xb4, 0xc3,
+	0x7a, 0x83, 0xa9, 0x13, 0xd6, 0xb0, 0x2a, 0xb2, 0xa9, 0x14, 0x68, 0x14, 0x70, 0x3c, 0x78, 0x0c,
+	0xab, 0x22, 0x9b, 0xaf, 0x39, 0x79, 0x08, 0x15, 0x97, 0xf3, 0x73, 0x46, 0xf1, 0xe4, 0xa8, 0x58,
+	0xba, 0x95, 0x78, 0x51, 0xce, 0xbc, 0x78, 0x0a, 0x35, 0x95, 0x93, 0x1d, 0xf7, 0xfa, 0x01, 0x9a,
+	0x3f, 0x83, 0xda, 0x21, 0x75, 0x22, 0x71, 0x4a, 0x1d, 0x41, 0x76, 0x61, 0xe5, 0x2c, 0x69, 0xa8,
+	0x13, 0x45, 0x4c, 0x22, 0xaa, 0x2d, 0x48, 0xaa, 0x3a, 0x49, 0x34, 0xe6, 0x5f, 0x97, 0x60, 0xb1,
+	0x1f, 0x4c, 0x6e, 0x3d, 0xe7, 0x33, 0x31, 0xb5, 0xf8, 0xc1, 0x31, 0x85, 0x9e, 0x32, 0x15, 0x85,
+	0x86, 0x25, 0xff, 0x35, 0xbf, 0x02, 0x43, 0xc6, 0xc0, 0x51, 0x3c, 0xba, 0x83, 0xc5, 0xff, 0x55,
+	0x09, 0x6a, 0xf2, 0x6c, 0xc0, 0xf5, 0xbf, 0x35, 0x5f, 0x6e, 0xdd, 0xca, 0x85, 0x75, 0x2b, 0x06,
+	0xc2, 0xd2, 0x74, 0x20, 0xcc, 0x8e, 0xe3, 0xa7, 0xd0, 0x78, 0x13, 0xfa, 0x2c, 0x38, 0x7f, 0xdf,
+	0x48, 0xb4, 0xe9, 0x42, 0x66, 0xfa, 0x77, 0x35, 0x80, 0x0e, 0xbd, 0x60, 0x2e, 0xed, 0x05, 0x43,
+	0x0c, 0x99, 0x0b, 0x1a, 0x78, 0x3c, 0xd2, 0x1b, 0x4e, 0xb7, 0xc8, 0x03, 0x58, 0x1a, 0x73, 0x8f,
+	0xfa, 0x3a, 0xbd, 0xa9, 0x06, 0xf9, 0x11, 0xb4, 0xcf, 0x9c, 0xc8, 0xbb, 0x74, 0x22, 0x6a, 0x5f,
+	0xd0, 0x48, 0x1e, 0xed, 0x7a, 0xd7, 0x2d, 0x27, 0xf2, 0xaf, 0x95, 0x58, 0x42, 0x87, 0x2c, 0x1a,
+	0x17, 0xa0, 0x65, 0x05, 0x4d, 0xe4, 0x09, 0x74, 0x13, 0x6a, 0x1e, 0x8e, 0x48, 0x8e, 0xbf, 0xad,
+	0x76, 0x8f, 0x12, 0xf4, 0x3c, 0xf2, 0x29, 0x3c, 0xd0, 0xca, 0x62, 0x50, 0xdc, 0x47, 0x1c, 0x51,
+	0xba, 0x7c, 0x44, 0x48, 0x3a, 0x79, 0x2e, 0xca, 0xc9, 0x8b, 0xf1, 0xd8, 0x35, 0xac, 0x6a, 0xc8,
+	0x03, 0x59, 0xda, 0xc7, 0xe4, 0x31, 0x80, 0xfc, 0x5c, 0x0b, 0xb8, 0xcf, 0x47, 0x57, 0x49, 0x42,
+	0xcb, 0x24, 0x64, 0x5b, 0x15, 0x36, 0xcc, 0x53, 0xc5, 0xa9, 0xde, 0x60, 0x80, 0x0b, 0x88, 0xb5,
+	0x26, 0xd9, 0x02, 0xd0, 0x08, 0xaa, 0x4b, 0x34, 0xc3, 0xaa, 0xa2, 0xbe, 0x1b, 0x78, 0xe4, 0x29,
+	0xb4, 0x1c, 0xdf, 0xe7, 0x6e, 0xc6, 0x50, 0x45, 0x44, 0x03, 0xa5, 0x09, 0xc7, 0x36, 0x34, 0x52,
+	0x14, 0xd5, 0xe5, 0x93, 0x61, 0x81, 0xc6, 0x48, 0x9e, 0x67, 0xd0, 0xce, 0x42, 0x42, 0x33, 0x01,
+	0xa2, 0x5a, 0x69, 0x60, 0x28, 0xae, 0xa7, 0xd0, 0xca, 0x21, 0xa9, 0xae, 0x66, 0x0c, 0xab, 0x91,
+	0xe2, 0x24, 0x9f, 0x09, 0x4d, 0x9d, 0x4c, 0x34, 0x59, 0x13, 0x41, 0x75, 0x95, 0x52, 0x14, 0xd3,
+	0x63, 0xa8, 0x27, 0x18, 0xaa, 0x0f, 0x7c, 0x43, 0x7d, 0xc6, 0x28, 0x8e, 0x2f, 0xa1, 0x12, 0x39,
+	0xc1, 0x88, 0xc6, 0x6b, 0xcb, 0xdb, 0x8b, 0xcf, 0xea, 0xcf, 0x9f, 0x65, 0x9f, 0x0d, 0x69, 0x40,
+	0xe9, 0x7f, 0x2d, 0x1a, 0xf3, 0x49, 0xe4, 0x52, 0x0b, 0xf1, 0x96, 0xb6, 0xdb, 0xf8, 0xfb, 0x32,
+	0x3c, 0x98, 0x07, 0x20, 0xeb, 0xc9, 0xd7, 0xae, 0x17, 0xaf, 0x95, 0xb6, 0x17, 0x9f, 0x19, 0xfa,
+	0x93, 0xd6, 0x9b, 0x5e, 0xb1, 0x85, 0x99, 0x15, 0x3b, 0x80, 0xa5, 0x90, 0x73, 0x3f, 0x5e, 0x5b,
+	0xc4, 0x41, 0xfd, 0xf8, 0x43, 0x07, 0xb5, 0x73, 0xcc, 0xb9, 0x6f, 0x29, 0xdb, 0x8d, 0xff, 0x5e,
+	0x80, 0xb2, 0x6c, 0x93, 0x3f, 0xcd, 0x1d, 0x3f, 0xad, 0xe7, 0xbf, 0x77, 0x2b, 0x32, 0xfc, 0x23,
+	0x53, 0xbe, 0x3e, 0xb6, 0x4e, 0xc0, 0x88, 0xcf, 0x9c, 0x88, 0x05, 0x23, 0x1c, 0x76, 0xeb, 0xf9,
+	0x4f, 0x6f, 0x47, 0x77, 0xa2, 0x8c, 0x91, 0x31, 0x61, 0x92, 0x1b, 0x53, 0x2d, 0xa0, 0x3a, 0x13,
+	0x54, 0x43, 0xee, 0x73, 0xaa, 0xbf, 0x9f, 0x0c, 0x4b, 0xfe, 0x6b, 0xee, 0x41, 0x35, 0x19, 0x0e,
+	0x01, 0xa8, 0xf4, 0x5f, 0xbf, 0xb1, 0x7b, 0x9d, 0xf6, 0x3d, 0xd2, 0x80, 0xea, 0xde, 0xab, 0x57,
+	0xfd, 0x03, 0xd9, 0x2a, 0x91, 0x16, 0xc0, 0xcb, 0xee, 0xd1, 0x71, 0xdf, 0x1a, 0xc8, 0xf6, 0x02,
+	0xa9, 0x83, 0xf1, 0xe2, 0x55, 0xff, 0xad, 0x6c, 0x2c, 0x9a, 0x67, 0x50, 0xcf, 0x0d, 0x81, 0x3c,
+	0x04, 0xd2, 0xe9, 0x76, 0x7a, 0x07, 0x7b, 0x83, 0x6e, 0xc7, 0x3e, 0xee, 0x5a, 0x76, 0xef, 0xf5,
+	0xe0, 0x45, 0xfb, 0x1e, 0x79, 0x02, 0x9b, 0x27, 0x87, 0x7b, 0x56, 0xb7, 0x63, 0xef, 0xff, 0x85,
+	0xbd, 0xf7, 0xea, 0x15, 0xca, 0xf1, 0x9f, 0x41, 0xf7, 0xe0, 0xb0, 0x5d, 0x22, 0xdb, 0xb0, 0x35,
+	0x07, 0x70, 0xb2, 0x77, 0xd4, 0x55, 0x88, 0x05, 0xf3, 0x6f, 0x16, 0x01, 0x0e, 0x7c, 0x27, 0x8e,
+	0xd9, 0x90, 0xd1, 0x08, 0xf3, 0xa7, 0x2d, 0xc2, 0x34, 0x9b, 0x2d, 0xf1, 0x41, 0xc8, 0x3c, 0xb2,
+	0x02, 0x4b, 0xdc, 0xbe, 0x48, 0xb3, 0x6a, 0x99, 0x7f, 0xcd, 0x30, 0xd7, 0x32, 0x85, 0xd5, 0x13,
+	0xc2, 0x12, 0x2c, 0x43, 0xac, 0x9a, 0x92, 0x32, 0x93, 0xd8, 0x47, 0x60, 0x70, 0x3b, 0x3c, 0x65,
+	0x22, 0xd6, 0x49, 0xb6, 0xc2, 0x8f, 0x65, 0x0b, 0xf3, 0xa7, 0x56, 0xe8, 0x13, 0x95, 0x29, 0xc5,
+	0x3a, 0x54, 0xa9, 0x38, 0x53, 0xe7, 0xba, 0xda, 0xea, 0x06, 0x15, 0x67, 0xc9, 0xb1, 0xee, 0xc5,
+	0xc2, 0x1e, 0x3b, 0x2e, 0x6e, 0xf1, 0x86, 0x55, 0xf1, 0x62, 0x71, 0xe4, 0xb8, 0x52, 0x11, 0x47,
+	0x2e, 0x2a, 0x6a, 0x4a, 0x11, 0x47, 0xae, 0x54, 0xc8, 0x20, 0x0f, 0xd5, 0xa5, 0x91, 0xde, 0xcb,
+	0x06, 0x0b, 0x8f, 0xf1, 0xe2, 0x69, 0x15, 0xa4, 0xb5, 0xcd, 0x42, 0xbd, 0x79, 0x97, 0xbc, 0x58,
+	0xf4, 0x42, 0x29, 0x96, 0x54, 0x2c, 0xd4, 0x79, 0x6c, 0x29, 0x8e, 0xdc, 0x5e, 0x28, 0x89, 0xa4,
+	0x58, 0xee, 0x6e, 0xbd, 0x8f, 0x65, 0x8f, 0x32, 0xc1, 0x49, 0x95, 0x24, 0x42, 0x95, 0xda, 0xc0,
+	0x72, 0x94, 0xa8, 0xda, 0x86, 0x46, 0x78, 0x2e, 0x6c, 0xe1, 0x8c, 0x94, 0x3f, 0xcb, 0x6a, 0x2b,
+	0x85, 0xe7, 0x62, 0xe0, 0xe0, 0x0a, 0x9b, 0xbf, 0x5a, 0x84, 0x9a, 0xfc, 0x70, 0xe2, 0xc1, 0xc1,
+	0x18, 0x53, 0x86, 0xe3, 0x79, 0x36, 0x9f, 0x08, 0x1a, 0x49, 0x2b, 0x5c, 0x8c, 0xaa, 0x55, 0x77,
+	0x3c, 0xaf, 0x2f, 0x65, 0x03, 0x67, 0x24, 0xd3, 0x94, 0x2c, 0xe1, 0x2f, 0x68, 0x0e, 0xb6, 0x80,
+	0xb0, 0x96, 0x92, 0xa7, 0xc8, 0x6d, 0x68, 0x88, 0xc8, 0x09, 0x6d, 0xc1, 0xed, 0x33, 0x1e, 0xab,
+	0xf0, 0xad, 0x5a, 0x20, 0x65, 0x03, 0x7e, 0xc8, 0x63, 0x41, 0x7e, 0x17, 0x48, 0x44, 0xc7, 0x4e,
+	0x74, 0xae, 0xb9, 0xd4, 0x7a, 0x94, 0x11, 0xd7, 0x56, 0x1a, 0x64, 0x53, 0x2b, 0x93, 0xa1, 0x59,
+	0x10, 0xa4, 0xe8, 0xa5, 0x3c, 0xba, 0x27, 0x15, 0x0a, 0xad, 0x7d, 0x51, 0x50, 0x39, 0xc8, 0x4a,
+	0xea, 0x0b, 0xa2, 0x8a, 0xbe, 0x64, 0x30, 0x23, 0xef, 0x4b, 0x8a, 0xdc, 0x81, 0x15, 0x21, 0xbf,
+	0x6e, 0x7d, 0x47, 0xe4, 0xc1, 0x55, 0x04, 0xdf, 0x4f, 0x55, 0xf3, 0xf1, 0xd9, 0x44, 0xd5, 0xa6,
+	0xf0, 0xc9, 0x5c, 0x99, 0xbf, 0x29, 0x41, 0x45, 0xad, 0x03, 0x79, 0x0a, 0x8b, 0xee, 0x38, 0xb9,
+	0xe1, 0x21, 0xd9, 0xa5, 0x51, 0xb2, 0x4a, 0x96, 0x54, 0xcf, 0xdf, 0x19, 0xb9, 0x68, 0x5f, 0x2c,
+	0x44, 0x7b, 0xb6, 0xbd, 0xca, 0x53, 0xdb, 0x4b, 0x6d, 0x99, 0xa5, 0xe2, 0x96, 0x99, 0xbf, 0x33,
+	0xb2, 0x7d, 0x67, 0xe4, 0xf6, 0x9d, 0xf9, 0x9b, 0x45, 0x28, 0xbf, 0xf0, 0xf9, 0x25, 0x1e, 0x84,
+	0xae, 0xfc, 0x42, 0xb6, 0xf3, 0x95, 0xc9, 0xb2, 0xd5, 0x50, 0xd2, 0xde, 0xbc, 0x4a, 0x69, 0x39,
+	0xa9, 0x94, 0x56, 0xa1, 0x32, 0x09, 0x98, 0x14, 0xd7, 0x95, 0x78, 0x12, 0xb0, 0x9b, 0x2a, 0xe2,
+	0x4d, 0xc0, 0x63, 0x4a, 0xc5, 0xb5, 0xaa, 0x32, 0xaa, 0x52, 0x80, 0x1b, 0x75, 0x1d, 0xaa, 0xc9,
+	0x61, 0x8b, 0xdb, 0x6e, 0xd9, 0x32, 0xf4, 0x41, 0x4b, 0x7e, 0x08, 0xcb, 0x01, 0x15, 0x97, 0x1c,
+	0xa3, 0x48, 0x8d, 0x72, 0x09, 0x11, 0x4d, 0x2d, 0xee, 0xcd, 0xab, 0xd4, 0x2b, 0x08, 0xc9, 0x15,
+	0x68, 0x9f, 0x03, 0xb8, 0x69, 0xf6, 0xd2, 0xb7, 0x36, 0x2b, 0xe9, 0x5a, 0x65, 0x89, 0xcd, 0xca,
+	0xc1, 0xc8, 0xc7, 0x50, 0x71, 0x70, 0x15, 0xf5, 0x6d, 0xcc, 0xf2, 0xd4, 0xe2, 0x5a, 0x5a, 0x4d,
+	0x36, 0xa0, 0x1a, 0x46, 0x8c, 0x47, 0x4c, 0x5c, 0x61, 0xc8, 0x2c, 0x5b, 0x69, 0x3b, 0x57, 0xf1,
+	0x37, 0x0a, 0x15, 0x7f, 0xae, 0xd4, 0x6c, 0x16, 0x4a, 0xcd, 0x75, 0xa8, 0x8e, 0x22, 0x3e, 0x09,
+	0xa5, 0x1f, 0x3a, 0x3f, 0x60, 0xbb, 0xe7, 0x99, 0x03, 0x68, 0x4c, 0xd7, 0x51, 0xaa, 0x18, 0x4c,
+	0x16, 0xaf, 0x61, 0x55, 0x95, 0xa0, 0xe7, 0x91, 0x8f, 0x61, 0x59, 0x2b, 0xe3, 0x90, 0xba, 0x6c,
+	0xc8, 0x5c, 0x5d, 0x64, 0xb6, 0x94, 0xf8, 0x44, 0x4b, 0xcd, 0x7f, 0x2f, 0x43, 0xab, 0x78, 0xab,
+	0x7a, 0x7d, 0xb5, 0xba, 0x0e, 0xd5, 0xe8, 0x9d, 0x7d, 0x7a, 0x25, 0x68, 0x8c, 0x6c, 0x15, 0xcb,
+	0x88, 0xde, 0xed, 0xcb, 0xa6, 0x5c, 0x81, 0xe8, 0x9d, 0x1d, 0x62, 0xb9, 0xab, 0xe2, 0xb9, 0x62,
+	0xd5, 0xa2, 0x77, 0xaa, 0xfe, 0x8d, 0x71, 0xef, 0xbe, 0xb3, 0x27, 0xae, 0x23, 0x73, 0x9f, 0x06,
+	0x95, 0x11, 0xd4, 0x8a, 0xde, 0xbd, 0x91, 0xe2, 0x22, 0x72, 0x5c, 0x40, 0x2e, 0x25, 0xc8, 0xa3,
+	0x59, 0xe4, 0x69, 0x01, 0x59, 0x49, 0x90, 0xfb, 0xb3, 0x48, 0x75, 0x9d, 0x94, 0x20, 0x8d, 0x04,
+	0x89, 0xd7, 0x43, 0x09, 0x72, 0x1d, 0xaa, 0x22, 0xf1, 0xb0, 0xaa, 0x3c, 0x14, 0x99, 0x87, 0x22,
+	0xf3, 0xb0, 0xa6, 0x3c, 0x14, 0x79, 0x0f, 0xc5, 0xb4, 0x87, 0xa0, 0xfa, 0x10, 0x33, 0x1e, 0x8a,
+	0x69, 0x0f, 0xeb, 0x09, 0xf2, 0x68, 0x16, 0x59, 0xf4, 0xb0, 0x91, 0x20, 0xf7, 0x67, 0x91, 0x45,
+	0x0f, 0x9b, 0x09, 0xb2, 0xe0, 0xa1, 0x09, 0xcd, 0xe8, 0x9d, 0xed, 0x46, 0xae, 0x42, 0xc7, 0x18,
+	0x65, 0x15, 0xab, 0x1e, 0xbd, 0x3b, 0x88, 0x5c, 0x44, 0xa2, 0xab, 0xa7, 0x2c, 0x4c, 0x00, 0xcb,
+	0xca, 0xd5, 0x53, 0x16, 0x6a, 0xf5, 0x16, 0xd4, 0x04, 0x1b, 0xd3, 0x58, 0x38, 0xe3, 0x10, 0xbf,
+	0x07, 0x0c, 0x2b, 0x13, 0x98, 0xdf, 0x95, 0xa0, 0x55, 0xbc, 0x6c, 0xcf, 0xe7, 0x85, 0x52, 0x21,
+	0x2f, 0x7c, 0xff, 0x80, 0xfa, 0xfe, 0x0b, 0x75, 0xf3, 0xe8, 0xbf, 0x84, 0x66, 0xe1, 0x76, 0xfe,
+	0xfa, 0xcd, 0xf0, 0x10, 0x2a, 0xb1, 0x70, 0xc4, 0x24, 0xd6, 0x35, 0xaf, 0x6e, 0x99, 0xdf, 0xc0,
+	0xca, 0x9c, 0x5b, 0xfa, 0x5b, 0x7f, 0x8c, 0x66, 0xf4, 0x8b, 0x05, 0xfa, 0x7f, 0x5e, 0x00, 0x32,
+	0x7b, 0x81, 0xff, 0x7d, 0x2e, 0x96, 0x7c, 0x1e, 0xdb, 0x85, 0x2e, 0x6a, 0x3e, 0x8f, 0x4f, 0x50,
+	0xa0, 0xd4, 0xa7, 0x89, 0xba, 0x9c, 0xa8, 0x4f, 0xb5, 0xfa, 0x19, 0xb4, 0x7d, 0x1e, 0xba, 0xf6,
+	0x98, 0xc5, 0x29, 0x87, 0xfa, 0x56, 0x6b, 0x49, 0xf9, 0x11, 0x8b, 0x13, 0xa2, 0xcf, 0x60, 0x55,
+	0x23, 0x75, 0xc0, 0x25, 0xf0, 0x8a, 0xfa, 0x3e, 0x54, 0x70, 0x15, 0x78, 0xda, 0xe4, 0x09, 0xd4,
+	0x7d, 0x3e, 0x64, 0x09, 0xd0, 0x50, 0x65, 0x90, 0x14, 0x69, 0xc0, 0x47, 0xd0, 0xf0, 0xb9, 0x33,
+	0x4e, 0x11, 0x55, 0x44, 0xd4, 0x51, 0xa6, 0x20, 0x26, 0x85, 0xcd, 0x1b, 0xde, 0x26, 0xee, 0x6c,
+	0x31, 0xfe, 0xa1, 0x04, 0x1b, 0xd7, 0x3f, 0x54, 0xdc, 0x55, 0x37, 0xe4, 0x73, 0x78, 0xc8, 0x02,
+	0xf9, 0x91, 0x4e, 0xed, 0x53, 0x26, 0xf4, 0x3c, 0x46, 0x8e, 0xa0, 0xba, 0x40, 0x58, 0xd1, 0xda,
+	0x7d, 0x26, 0x70, 0x22, 0x2d, 0x47, 0x50, 0xf3, 0xd7, 0x6a, 0x6c, 0xd7, 0xbc, 0x73, 0xdc, 0xd9,
+	0xd8, 0x1e, 0xc0, 0x12, 0xbe, 0xb8, 0x24, 0xb5, 0x0a, 0x36, 0x24, 0x7b, 0x40, 0x2f, 0x6d, 0xfa,
+	0x6d, 0x52, 0xad, 0x54, 0x02, 0x7a, 0xd9, 0xfd, 0xd6, 0x33, 0xcf, 0xe0, 0xf1, 0xcd, 0xaf, 0x24,
+	0x77, 0xb6, 0x36, 0xff, 0x58, 0x52, 0x31, 0x70, 0xcd, 0xbb, 0xc9, 0xff, 0xef, 0xe2, 0xfc, 0xb2,
+	0x04, 0xe6, 0xfb, 0xdf, 0x60, 0xfe, 0x6f, 0x17, 0xc9, 0xfc, 0x16, 0xd7, 0xe2, 0x86, 0xb7, 0x9a,
+	0x5b, 0xf7, 0xff, 0x04, 0xea, 0xf8, 0xa0, 0x12, 0x51, 0x27, 0xd6, 0x57, 0x4f, 0x86, 0x05, 0x52,
+	0x64, 0xa1, 0xc4, 0x3c, 0x87, 0x8f, 0xde, 0xfb, 0xb0, 0x72, 0x67, 0x11, 0xc0, 0x60, 0xed, 0xba,
+	0x27, 0x96, 0xef, 0x93, 0x2f, 0x23, 0x8f, 0x25, 0x47, 0xa2, 0x72, 0xac, 0x16, 0x79, 0x4c, 0x1d,
+	0x89, 0xe6, 0x6b, 0xd8, 0xba, 0xe9, 0x4d, 0xea, 0xb6, 0xdd, 0x99, 0x31, 0x6c, 0xbf, 0xef, 0xfd,
+	0xe6, 0xee, 0x17, 0xe7, 0xaf, 0x60, 0xfd, 0xda, 0xd7, 0x9c, 0xbb, 0x9e, 0xb0, 0xff, 0x2a, 0x41,
+	0xfd, 0xa5, 0x2c, 0x6c, 0x8f, 0x28, 0x16, 0xb3, 0x1f, 0x41, 0x83, 0x25, 0x17, 0xde, 0x49, 0x1f,
+	0x4d, 0xfc, 0xe1, 0x83, 0x92, 0xf5, 0x3c, 0xd2, 0x83, 0x56, 0x06, 0xc1, 0x2f, 0x09, 0x75, 0x6b,
+	0x93, 0xbd, 0x45, 0xe5, 0x08, 0x77, 0xd2, 0xeb, 0x73, 0xbc, 0x9e, 0x69, 0xb2, 0x7c, 0x93, 0x3c,
+	0x86, 0xfa, 0x88, 0x8e, 0xed, 0xe4, 0x83, 0x61, 0x11, 0x3b, 0x93, 0x1f, 0x0c, 0xc7, 0xea, 0x83,
+	0x21, 0x5f, 0xd2, 0x97, 0x51, 0x99, 0xb6, 0xcd, 0x3f, 0x82, 0x66, 0x81, 0x9b, 0x18, 0xb0, 0x78,
+	0xdc, 0x7f, 0xdd, 0xbe, 0x47, 0xda, 0xd0, 0xe8, 0x1e, 0xf7, 0x5f, 0xdb, 0x9f, 0xbd, 0xb4, 0x8f,
+	0xf7, 0x06, 0x87, 0xed, 0x12, 0xb9, 0x0f, 0x4d, 0x25, 0xf9, 0x54, 0x8b, 0x16, 0xcc, 0xbf, 0x5d,
+	0x80, 0x25, 0x1c, 0x67, 0xa1, 0xd4, 0x57, 0xee, 0x26, 0xa5, 0x3e, 0xf9, 0x19, 0x18, 0x2e, 0x1f,
+	0x8f, 0x1d, 0xfd, 0x0b, 0x80, 0x19, 0x1f, 0xf3, 0x9e, 0xc6, 0x07, 0x0a, 0x69, 0x25, 0x26, 0x64,
+	0x07, 0x8c, 0xb1, 0x52, 0xe9, 0x3b, 0xb7, 0x07, 0xf3, 0x66, 0xc8, 0x4a, 0x40, 0xb9, 0x2f, 0x9d,
+	0xf2, 0x8d, 0x5f, 0x3a, 0xe6, 0x57, 0xb0, 0x32, 0xa7, 0x63, 0xb2, 0x0c, 0xf5, 0xbd, 0x4e, 0xc7,
+	0x3e, 0xea, 0x1e, 0xed, 0x77, 0xad, 0x93, 0xf6, 0x3d, 0x42, 0xa0, 0x65, 0x75, 0x8f, 0xfa, 0x5f,
+	0x77, 0x53, 0x59, 0x49, 0x82, 0x4e, 0xba, 0x83, 0x54, 0xb0, 0x60, 0x1a, 0xb0, 0xd4, 0x1d, 0x87,
+	0xe2, 0xea, 0xf9, 0x3f, 0xb5, 0xc0, 0xe8, 0xab, 0x0e, 0x49, 0x07, 0xa0, 0xc3, 0x62, 0xe7, 0xd4,
+	0xa7, 0x7d, 0x5f, 0x90, 0x56, 0x3a, 0x10, 0x44, 0x6e, 0x4c, 0xb5, 0xcd, 0x87, 0xbf, 0xfc, 0x8f,
+	0xff, 0xfc, 0xf5, 0x42, 0xdb, 0xac, 0xef, 0x5e, 0x7c, 0xb6, 0xab, 0xed, 0xbe, 0x28, 0x7d, 0x42,
+	0x5e, 0x40, 0xdd, 0xa2, 0x34, 0xf8, 0x50, 0x9a, 0x47, 0x48, 0x73, 0xdf, 0x6c, 0x48, 0x9a, 0xc4,
+	0x50, 0xf2, 0x74, 0xa1, 0xae, 0xb3, 0x23, 0xed, 0x07, 0x13, 0xd2, 0xc8, 0x3f, 0x7a, 0xce, 0xb0,
+	0xac, 0x21, 0x0b, 0x31, 0x9b, 0x92, 0xa5, 0xab, 0x3a, 0x0f, 0x26, 0x92, 0xe6, 0x10, 0x9a, 0xe9,
+	0x4e, 0xfe, 0x00, 0xa2, 0x75, 0x24, 0x5a, 0x31, 0x5b, 0x39, 0xaf, 0x34, 0xd3, 0x01, 0xd4, 0x3a,
+	0xd4, 0xa7, 0xb7, 0x1e, 0x4e, 0x6a, 0x24, 0x49, 0x7a, 0x00, 0xfa, 0xc5, 0xa5, 0x3f, 0x11, 0xa4,
+	0x5d, 0xf8, 0x25, 0xcf, 0x51, 0x3c, 0xba, 0x79, 0x3c, 0x99, 0xa5, 0xa4, 0xea, 0x43, 0x23, 0x7d,
+	0x6e, 0x91, 0x64, 0xa4, 0xf0, 0x42, 0x8f, 0xe2, 0x19, 0xba, 0x4d, 0xa4, 0x5b, 0x35, 0xdb, 0x48,
+	0x97, 0xb3, 0x96, 0x84, 0x7f, 0x0e, 0xcb, 0xf9, 0x87, 0x13, 0xc9, 0x99, 0xbd, 0x2b, 0xe5, 0x35,
+	0x33, 0xb4, 0x8f, 0x91, 0x76, 0xcd, 0x5c, 0x91, 0xb4, 0x53, 0x1c, 0x92, 0xf9, 0x4b, 0x30, 0xe4,
+	0x57, 0xc9, 0x9e, 0xe7, 0x91, 0x66, 0xe1, 0x47, 0x41, 0x37, 0x47, 0x95, 0xb6, 0x51, 0x51, 0x05,
+	0xb2, 0x65, 0xe1, 0x5d, 0xd3, 0xfb, 0x48, 0x0a, 0x93, 0x96, 0x99, 0x49, 0x9e, 0x13, 0x68, 0xa5,
+	0xaf, 0x76, 0x07, 0x67, 0xd4, 0x3d, 0x9f, 0x09, 0xd0, 0x6c, 0x1a, 0x53, 0xa0, 0xf9, 0x03, 0x24,
+	0x7c, 0x64, 0x12, 0x49, 0x58, 0xb4, 0x97, 0xa4, 0x47, 0x50, 0x57, 0x31, 0x77, 0xcc, 0x83, 0xde,
+	0x30, 0xb7, 0x10, 0x69, 0xae, 0x9a, 0x19, 0xe2, 0x06, 0x32, 0x3e, 0x30, 0x97, 0xb3, 0x80, 0x45,
+	0x63, 0xbd, 0xb0, 0x3a, 0xf2, 0x3e, 0x9c, 0xaf, 0xb0, 0xb0, 0x79, 0x6b, 0x49, 0x68, 0x41, 0xf3,
+	0x25, 0x15, 0xb9, 0x87, 0xad, 0x69, 0x9f, 0x57, 0xe6, 0xdc, 0xbd, 0x9b, 0x5b, 0x48, 0xf9, 0xd0,
+	0xbc, 0x2f, 0x29, 0x0b, 0xf6, 0x92, 0xf3, 0x4f, 0xa0, 0x62, 0xd1, 0x53, 0xce, 0xdf, 0xbf, 0xc3,
+	0x57, 0x91, 0x67, 0xd9, 0x04, 0xb5, 0xc3, 0xa5, 0x8d, 0x24, 0x78, 0x03, 0xf7, 0x0f, 0xb8, 0xef,
+	0x53, 0x37, 0x7f, 0xfb, 0xf1, 0x3e, 0xae, 0x6d, 0xe4, 0xda, 0x30, 0x57, 0x25, 0xd7, 0x8c, 0xb9,
+	0xa4, 0x8d, 0xe0, 0xd1, 0x41, 0x44, 0x1d, 0x41, 0x07, 0x91, 0x33, 0x1c, 0x32, 0xf7, 0xc4, 0x3d,
+	0xa3, 0xde, 0xc4, 0x97, 0xa9, 0xf6, 0xc9, 0x4e, 0xe1, 0xc7, 0x88, 0x33, 0x80, 0x99, 0xde, 0x7e,
+	0x88, 0xbd, 0x6d, 0x9b, 0x9b, 0xd8, 0xdb, 0x7c, 0x56, 0xdd, 0xa7, 0x8a, 0xb0, 0xbb, 0xee, 0xf3,
+	0x1a, 0x56, 0xd9, 0xe7, 0x10, 0x56, 0x0a, 0x23, 0xfa, 0xb3, 0x09, 0x9d, 0xd0, 0x98, 0x6c, 0xce,
+	0xed, 0x4f, 0x29, 0x67, 0xfa, 0x32, 0xb1, 0xaf, 0x2d, 0xf3, 0xd1, 0x8c, 0x7f, 0xca, 0x40, 0xf7,
+	0x53, 0x18, 0xc5, 0xff, 0xba, 0x9f, 0x39, 0x6c, 0xb2, 0x9f, 0x3f, 0x84, 0xb6, 0xda, 0x06, 0xb9,
+	0xb2, 0xe7, 0xfa, 0x30, 0xcd, 0x40, 0xe6, 0xbd, 0x4f, 0x4b, 0xe4, 0x1b, 0x58, 0x3d, 0xa6, 0xd1,
+	0x90, 0x47, 0x63, 0x3c, 0x22, 0xfb, 0x21, 0x8d, 0xa6, 0x19, 0x50, 0x31, 0x33, 0xb2, 0xa7, 0x38,
+	0xb2, 0xc7, 0xe6, 0xba, 0x1c, 0xd9, 0x5c, 0x8a, 0x2f, 0x4a, 0x9f, 0xec, 0x7f, 0x03, 0x9b, 0x3c,
+	0x1a, 0xa1, 0xa9, 0xcb, 0x23, 0x6f, 0x47, 0xfd, 0xc2, 0x35, 0xa1, 0xda, 0x6f, 0x7e, 0x8d, 0x6d,
+	0x79, 0x8c, 0xf6, 0x5f, 0x0d, 0x7e, 0xbe, 0x3b, 0x62, 0xe2, 0x6c, 0x72, 0xba, 0xe3, 0xf2, 0xf1,
+	0x6e, 0x62, 0xb2, 0xab, 0x4c, 0x7e, 0xac, 0x7f, 0x14, 0x7b, 0xf1, 0xf9, 0xee, 0x88, 0x27, 0x3f,
+	0xaf, 0x3d, 0x2e, 0x9d, 0x56, 0x50, 0xfe, 0xf9, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x5a,
+	0x82, 0xc1, 0x80, 0x2b, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// OpenoltClient is the client API for Openolt service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type OpenoltClient interface {
+	DisableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	ReenableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	ActivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	DeactivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	DeleteOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error)
+	OmciMsgOut(ctx context.Context, in *OmciMsg, opts ...grpc.CallOption) (*Empty, error)
+	OnuPacketOut(ctx context.Context, in *OnuPacket, opts ...grpc.CallOption) (*Empty, error)
+	UplinkPacketOut(ctx context.Context, in *UplinkPacket, opts ...grpc.CallOption) (*Empty, error)
+	FlowAdd(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error)
+	FlowRemove(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error)
+	HeartbeatCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Heartbeat, error)
+	EnablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error)
+	DisablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error)
+	GetDeviceInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*DeviceInfo, error)
+	Reboot(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	CollectStatistics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error)
+	CreateTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error)
+	RemoveTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error)
+	CreateTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error)
+	RemoveTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error)
+	EnableIndication(ctx context.Context, in *Empty, opts ...grpc.CallOption) (Openolt_EnableIndicationClient, error)
+	PerformGroupOperation(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Empty, error)
+}
+
+type openoltClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewOpenoltClient(cc *grpc.ClientConn) OpenoltClient {
+	return &openoltClient{cc}
+}
+
+func (c *openoltClient) DisableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DisableOlt", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) ReenableOlt(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/ReenableOlt", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) ActivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/ActivateOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DeactivateOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DeactivateOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DeleteOnu(ctx context.Context, in *Onu, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DeleteOnu", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) OmciMsgOut(ctx context.Context, in *OmciMsg, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/OmciMsgOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) OnuPacketOut(ctx context.Context, in *OnuPacket, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/OnuPacketOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) UplinkPacketOut(ctx context.Context, in *UplinkPacket, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/UplinkPacketOut", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) FlowAdd(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/FlowAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) FlowRemove(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/FlowRemove", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) HeartbeatCheck(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Heartbeat, error) {
+	out := new(Heartbeat)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/HeartbeatCheck", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) EnablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/EnablePonIf", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) DisablePonIf(ctx context.Context, in *Interface, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/DisablePonIf", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) GetDeviceInfo(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*DeviceInfo, error) {
+	out := new(DeviceInfo)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/GetDeviceInfo", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) Reboot(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/Reboot", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CollectStatistics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CollectStatistics", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CreateTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CreateTrafficSchedulers", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) RemoveTrafficSchedulers(ctx context.Context, in *tech_profile.TrafficSchedulers, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/RemoveTrafficSchedulers", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) CreateTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/CreateTrafficQueues", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) RemoveTrafficQueues(ctx context.Context, in *tech_profile.TrafficQueues, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/RemoveTrafficQueues", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *openoltClient) EnableIndication(ctx context.Context, in *Empty, opts ...grpc.CallOption) (Openolt_EnableIndicationClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Openolt_serviceDesc.Streams[0], "/openolt.Openolt/EnableIndication", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &openoltEnableIndicationClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Openolt_EnableIndicationClient interface {
+	Recv() (*Indication, error)
+	grpc.ClientStream
+}
+
+type openoltEnableIndicationClient struct {
+	grpc.ClientStream
+}
+
+func (x *openoltEnableIndicationClient) Recv() (*Indication, error) {
+	m := new(Indication)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *openoltClient) PerformGroupOperation(ctx context.Context, in *Group, opts ...grpc.CallOption) (*Empty, error) {
+	out := new(Empty)
+	err := c.cc.Invoke(ctx, "/openolt.Openolt/PerformGroupOperation", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// OpenoltServer is the server API for Openolt service.
+type OpenoltServer interface {
+	DisableOlt(context.Context, *Empty) (*Empty, error)
+	ReenableOlt(context.Context, *Empty) (*Empty, error)
+	ActivateOnu(context.Context, *Onu) (*Empty, error)
+	DeactivateOnu(context.Context, *Onu) (*Empty, error)
+	DeleteOnu(context.Context, *Onu) (*Empty, error)
+	OmciMsgOut(context.Context, *OmciMsg) (*Empty, error)
+	OnuPacketOut(context.Context, *OnuPacket) (*Empty, error)
+	UplinkPacketOut(context.Context, *UplinkPacket) (*Empty, error)
+	FlowAdd(context.Context, *Flow) (*Empty, error)
+	FlowRemove(context.Context, *Flow) (*Empty, error)
+	HeartbeatCheck(context.Context, *Empty) (*Heartbeat, error)
+	EnablePonIf(context.Context, *Interface) (*Empty, error)
+	DisablePonIf(context.Context, *Interface) (*Empty, error)
+	GetDeviceInfo(context.Context, *Empty) (*DeviceInfo, error)
+	Reboot(context.Context, *Empty) (*Empty, error)
+	CollectStatistics(context.Context, *Empty) (*Empty, error)
+	CreateTrafficSchedulers(context.Context, *tech_profile.TrafficSchedulers) (*Empty, error)
+	RemoveTrafficSchedulers(context.Context, *tech_profile.TrafficSchedulers) (*Empty, error)
+	CreateTrafficQueues(context.Context, *tech_profile.TrafficQueues) (*Empty, error)
+	RemoveTrafficQueues(context.Context, *tech_profile.TrafficQueues) (*Empty, error)
+	EnableIndication(*Empty, Openolt_EnableIndicationServer) error
+	PerformGroupOperation(context.Context, *Group) (*Empty, error)
+}
+
+func RegisterOpenoltServer(s *grpc.Server, srv OpenoltServer) {
+	s.RegisterService(&_Openolt_serviceDesc, srv)
+}
+
+func _Openolt_DisableOlt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DisableOlt(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DisableOlt",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DisableOlt(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_ReenableOlt_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).ReenableOlt(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/ReenableOlt",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).ReenableOlt(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_ActivateOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).ActivateOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/ActivateOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).ActivateOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DeactivateOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DeactivateOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DeactivateOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DeactivateOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DeleteOnu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Onu)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DeleteOnu(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DeleteOnu",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DeleteOnu(ctx, req.(*Onu))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_OmciMsgOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OmciMsg)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).OmciMsgOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/OmciMsgOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).OmciMsgOut(ctx, req.(*OmciMsg))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_OnuPacketOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(OnuPacket)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).OnuPacketOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/OnuPacketOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).OnuPacketOut(ctx, req.(*OnuPacket))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_UplinkPacketOut_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UplinkPacket)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).UplinkPacketOut(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/UplinkPacketOut",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).UplinkPacketOut(ctx, req.(*UplinkPacket))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_FlowAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Flow)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).FlowAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/FlowAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).FlowAdd(ctx, req.(*Flow))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_FlowRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Flow)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).FlowRemove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/FlowRemove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).FlowRemove(ctx, req.(*Flow))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_HeartbeatCheck_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).HeartbeatCheck(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/HeartbeatCheck",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).HeartbeatCheck(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_EnablePonIf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Interface)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).EnablePonIf(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/EnablePonIf",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).EnablePonIf(ctx, req.(*Interface))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_DisablePonIf_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Interface)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).DisablePonIf(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/DisablePonIf",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).DisablePonIf(ctx, req.(*Interface))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_GetDeviceInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).GetDeviceInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/GetDeviceInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).GetDeviceInfo(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_Reboot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).Reboot(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/Reboot",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).Reboot(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CollectStatistics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CollectStatistics(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CollectStatistics",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CollectStatistics(ctx, req.(*Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CreateTrafficSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficSchedulers)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CreateTrafficSchedulers(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CreateTrafficSchedulers",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CreateTrafficSchedulers(ctx, req.(*tech_profile.TrafficSchedulers))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_RemoveTrafficSchedulers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficSchedulers)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).RemoveTrafficSchedulers(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/RemoveTrafficSchedulers",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).RemoveTrafficSchedulers(ctx, req.(*tech_profile.TrafficSchedulers))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_CreateTrafficQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficQueues)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).CreateTrafficQueues(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/CreateTrafficQueues",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).CreateTrafficQueues(ctx, req.(*tech_profile.TrafficQueues))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_RemoveTrafficQueues_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(tech_profile.TrafficQueues)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).RemoveTrafficQueues(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/RemoveTrafficQueues",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).RemoveTrafficQueues(ctx, req.(*tech_profile.TrafficQueues))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Openolt_EnableIndication_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(OpenoltServer).EnableIndication(m, &openoltEnableIndicationServer{stream})
+}
+
+type Openolt_EnableIndicationServer interface {
+	Send(*Indication) error
+	grpc.ServerStream
+}
+
+type openoltEnableIndicationServer struct {
+	grpc.ServerStream
+}
+
+func (x *openoltEnableIndicationServer) Send(m *Indication) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Openolt_PerformGroupOperation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Group)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(OpenoltServer).PerformGroupOperation(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/openolt.Openolt/PerformGroupOperation",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(OpenoltServer).PerformGroupOperation(ctx, req.(*Group))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Openolt_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "openolt.Openolt",
+	HandlerType: (*OpenoltServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "DisableOlt",
+			Handler:    _Openolt_DisableOlt_Handler,
+		},
+		{
+			MethodName: "ReenableOlt",
+			Handler:    _Openolt_ReenableOlt_Handler,
+		},
+		{
+			MethodName: "ActivateOnu",
+			Handler:    _Openolt_ActivateOnu_Handler,
+		},
+		{
+			MethodName: "DeactivateOnu",
+			Handler:    _Openolt_DeactivateOnu_Handler,
+		},
+		{
+			MethodName: "DeleteOnu",
+			Handler:    _Openolt_DeleteOnu_Handler,
+		},
+		{
+			MethodName: "OmciMsgOut",
+			Handler:    _Openolt_OmciMsgOut_Handler,
+		},
+		{
+			MethodName: "OnuPacketOut",
+			Handler:    _Openolt_OnuPacketOut_Handler,
+		},
+		{
+			MethodName: "UplinkPacketOut",
+			Handler:    _Openolt_UplinkPacketOut_Handler,
+		},
+		{
+			MethodName: "FlowAdd",
+			Handler:    _Openolt_FlowAdd_Handler,
+		},
+		{
+			MethodName: "FlowRemove",
+			Handler:    _Openolt_FlowRemove_Handler,
+		},
+		{
+			MethodName: "HeartbeatCheck",
+			Handler:    _Openolt_HeartbeatCheck_Handler,
+		},
+		{
+			MethodName: "EnablePonIf",
+			Handler:    _Openolt_EnablePonIf_Handler,
+		},
+		{
+			MethodName: "DisablePonIf",
+			Handler:    _Openolt_DisablePonIf_Handler,
+		},
+		{
+			MethodName: "GetDeviceInfo",
+			Handler:    _Openolt_GetDeviceInfo_Handler,
+		},
+		{
+			MethodName: "Reboot",
+			Handler:    _Openolt_Reboot_Handler,
+		},
+		{
+			MethodName: "CollectStatistics",
+			Handler:    _Openolt_CollectStatistics_Handler,
+		},
+		{
+			MethodName: "CreateTrafficSchedulers",
+			Handler:    _Openolt_CreateTrafficSchedulers_Handler,
+		},
+		{
+			MethodName: "RemoveTrafficSchedulers",
+			Handler:    _Openolt_RemoveTrafficSchedulers_Handler,
+		},
+		{
+			MethodName: "CreateTrafficQueues",
+			Handler:    _Openolt_CreateTrafficQueues_Handler,
+		},
+		{
+			MethodName: "RemoveTrafficQueues",
+			Handler:    _Openolt_RemoveTrafficQueues_Handler,
+		},
+		{
+			MethodName: "PerformGroupOperation",
+			Handler:    _Openolt_PerformGroupOperation_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "EnableIndication",
+			Handler:       _Openolt_EnableIndication_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "voltha_protos/openolt.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v3/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/v3/go/tech_profile/tech_profile.pb.go
new file mode 100644
index 0000000..f74a616
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v3/go/tech_profile/tech_profile.pb.go
@@ -0,0 +1,972 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/tech_profile.proto
+
+package tech_profile
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Direction int32
+
+const (
+	Direction_UPSTREAM      Direction = 0
+	Direction_DOWNSTREAM    Direction = 1
+	Direction_BIDIRECTIONAL Direction = 2
+)
+
+var Direction_name = map[int32]string{
+	0: "UPSTREAM",
+	1: "DOWNSTREAM",
+	2: "BIDIRECTIONAL",
+}
+
+var Direction_value = map[string]int32{
+	"UPSTREAM":      0,
+	"DOWNSTREAM":    1,
+	"BIDIRECTIONAL": 2,
+}
+
+func (x Direction) String() string {
+	return proto.EnumName(Direction_name, int32(x))
+}
+
+func (Direction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{0}
+}
+
+type SchedulingPolicy int32
+
+const (
+	SchedulingPolicy_WRR            SchedulingPolicy = 0
+	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
+	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
+)
+
+var SchedulingPolicy_name = map[int32]string{
+	0: "WRR",
+	1: "StrictPriority",
+	2: "Hybrid",
+}
+
+var SchedulingPolicy_value = map[string]int32{
+	"WRR":            0,
+	"StrictPriority": 1,
+	"Hybrid":         2,
+}
+
+func (x SchedulingPolicy) String() string {
+	return proto.EnumName(SchedulingPolicy_name, int32(x))
+}
+
+func (SchedulingPolicy) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{1}
+}
+
+type AdditionalBW int32
+
+const (
+	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
+	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
+	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
+	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
+)
+
+var AdditionalBW_name = map[int32]string{
+	0: "AdditionalBW_None",
+	1: "AdditionalBW_NA",
+	2: "AdditionalBW_BestEffort",
+	3: "AdditionalBW_Auto",
+}
+
+var AdditionalBW_value = map[string]int32{
+	"AdditionalBW_None":       0,
+	"AdditionalBW_NA":         1,
+	"AdditionalBW_BestEffort": 2,
+	"AdditionalBW_Auto":       3,
+}
+
+func (x AdditionalBW) String() string {
+	return proto.EnumName(AdditionalBW_name, int32(x))
+}
+
+func (AdditionalBW) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{2}
+}
+
+type DiscardPolicy int32
+
+const (
+	DiscardPolicy_TailDrop  DiscardPolicy = 0
+	DiscardPolicy_WTailDrop DiscardPolicy = 1
+	DiscardPolicy_Red       DiscardPolicy = 2
+	DiscardPolicy_WRed      DiscardPolicy = 3
+)
+
+var DiscardPolicy_name = map[int32]string{
+	0: "TailDrop",
+	1: "WTailDrop",
+	2: "Red",
+	3: "WRed",
+}
+
+var DiscardPolicy_value = map[string]int32{
+	"TailDrop":  0,
+	"WTailDrop": 1,
+	"Red":       2,
+	"WRed":      3,
+}
+
+func (x DiscardPolicy) String() string {
+	return proto.EnumName(DiscardPolicy_name, int32(x))
+}
+
+func (DiscardPolicy) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{3}
+}
+
+type InferredAdditionBWIndication int32
+
+const (
+	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
+	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
+	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
+)
+
+var InferredAdditionBWIndication_name = map[int32]string{
+	0: "InferredAdditionBWIndication_None",
+	1: "InferredAdditionBWIndication_Assured",
+	2: "InferredAdditionBWIndication_BestEffort",
+}
+
+var InferredAdditionBWIndication_value = map[string]int32{
+	"InferredAdditionBWIndication_None":       0,
+	"InferredAdditionBWIndication_Assured":    1,
+	"InferredAdditionBWIndication_BestEffort": 2,
+}
+
+func (x InferredAdditionBWIndication) String() string {
+	return proto.EnumName(InferredAdditionBWIndication_name, int32(x))
+}
+
+func (InferredAdditionBWIndication) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{4}
+}
+
+type SchedulerConfig struct {
+	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	AdditionalBw         AdditionalBW     `protobuf:"varint,2,opt,name=additional_bw,json=additionalBw,proto3,enum=tech_profile.AdditionalBW" json:"additional_bw,omitempty"`
+	Priority             uint32           `protobuf:"fixed32,3,opt,name=priority,proto3" json:"priority,omitempty"`
+	Weight               uint32           `protobuf:"fixed32,4,opt,name=weight,proto3" json:"weight,omitempty"`
+	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SchedulerConfig) Reset()         { *m = SchedulerConfig{} }
+func (m *SchedulerConfig) String() string { return proto.CompactTextString(m) }
+func (*SchedulerConfig) ProtoMessage()    {}
+func (*SchedulerConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{0}
+}
+
+func (m *SchedulerConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SchedulerConfig.Unmarshal(m, b)
+}
+func (m *SchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SchedulerConfig.Marshal(b, m, deterministic)
+}
+func (m *SchedulerConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SchedulerConfig.Merge(m, src)
+}
+func (m *SchedulerConfig) XXX_Size() int {
+	return xxx_messageInfo_SchedulerConfig.Size(m)
+}
+func (m *SchedulerConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_SchedulerConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SchedulerConfig proto.InternalMessageInfo
+
+func (m *SchedulerConfig) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *SchedulerConfig) GetAdditionalBw() AdditionalBW {
+	if m != nil {
+		return m.AdditionalBw
+	}
+	return AdditionalBW_AdditionalBW_None
+}
+
+func (m *SchedulerConfig) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *SchedulerConfig) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *SchedulerConfig) GetSchedPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+type TrafficShapingInfo struct {
+	Cir                  uint32                       `protobuf:"fixed32,1,opt,name=cir,proto3" json:"cir,omitempty"`
+	Cbs                  uint32                       `protobuf:"fixed32,2,opt,name=cbs,proto3" json:"cbs,omitempty"`
+	Pir                  uint32                       `protobuf:"fixed32,3,opt,name=pir,proto3" json:"pir,omitempty"`
+	Pbs                  uint32                       `protobuf:"fixed32,4,opt,name=pbs,proto3" json:"pbs,omitempty"`
+	Gir                  uint32                       `protobuf:"fixed32,5,opt,name=gir,proto3" json:"gir,omitempty"`
+	AddBwInd             InferredAdditionBWIndication `protobuf:"varint,6,opt,name=add_bw_ind,json=addBwInd,proto3,enum=tech_profile.InferredAdditionBWIndication" json:"add_bw_ind,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *TrafficShapingInfo) Reset()         { *m = TrafficShapingInfo{} }
+func (m *TrafficShapingInfo) String() string { return proto.CompactTextString(m) }
+func (*TrafficShapingInfo) ProtoMessage()    {}
+func (*TrafficShapingInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{1}
+}
+
+func (m *TrafficShapingInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficShapingInfo.Unmarshal(m, b)
+}
+func (m *TrafficShapingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficShapingInfo.Marshal(b, m, deterministic)
+}
+func (m *TrafficShapingInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficShapingInfo.Merge(m, src)
+}
+func (m *TrafficShapingInfo) XXX_Size() int {
+	return xxx_messageInfo_TrafficShapingInfo.Size(m)
+}
+func (m *TrafficShapingInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficShapingInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficShapingInfo proto.InternalMessageInfo
+
+func (m *TrafficShapingInfo) GetCir() uint32 {
+	if m != nil {
+		return m.Cir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetCbs() uint32 {
+	if m != nil {
+		return m.Cbs
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetPir() uint32 {
+	if m != nil {
+		return m.Pir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetPbs() uint32 {
+	if m != nil {
+		return m.Pbs
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetGir() uint32 {
+	if m != nil {
+		return m.Gir
+	}
+	return 0
+}
+
+func (m *TrafficShapingInfo) GetAddBwInd() InferredAdditionBWIndication {
+	if m != nil {
+		return m.AddBwInd
+	}
+	return InferredAdditionBWIndication_InferredAdditionBWIndication_None
+}
+
+type TrafficScheduler struct {
+	Direction            Direction           `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	AllocId              uint32              `protobuf:"fixed32,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	Scheduler            *SchedulerConfig    `protobuf:"bytes,3,opt,name=scheduler,proto3" json:"scheduler,omitempty"`
+	TrafficShapingInfo   *TrafficShapingInfo `protobuf:"bytes,4,opt,name=traffic_shaping_info,json=trafficShapingInfo,proto3" json:"traffic_shaping_info,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *TrafficScheduler) Reset()         { *m = TrafficScheduler{} }
+func (m *TrafficScheduler) String() string { return proto.CompactTextString(m) }
+func (*TrafficScheduler) ProtoMessage()    {}
+func (*TrafficScheduler) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{2}
+}
+
+func (m *TrafficScheduler) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficScheduler.Unmarshal(m, b)
+}
+func (m *TrafficScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficScheduler.Marshal(b, m, deterministic)
+}
+func (m *TrafficScheduler) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficScheduler.Merge(m, src)
+}
+func (m *TrafficScheduler) XXX_Size() int {
+	return xxx_messageInfo_TrafficScheduler.Size(m)
+}
+func (m *TrafficScheduler) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficScheduler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficScheduler proto.InternalMessageInfo
+
+func (m *TrafficScheduler) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *TrafficScheduler) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *TrafficScheduler) GetScheduler() *SchedulerConfig {
+	if m != nil {
+		return m.Scheduler
+	}
+	return nil
+}
+
+func (m *TrafficScheduler) GetTrafficShapingInfo() *TrafficShapingInfo {
+	if m != nil {
+		return m.TrafficShapingInfo
+	}
+	return nil
+}
+
+type TrafficSchedulers struct {
+	IntfId               uint32              `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32              `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32              `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	PortNo               uint32              `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	TrafficScheds        []*TrafficScheduler `protobuf:"bytes,3,rep,name=traffic_scheds,json=trafficScheds,proto3" json:"traffic_scheds,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *TrafficSchedulers) Reset()         { *m = TrafficSchedulers{} }
+func (m *TrafficSchedulers) String() string { return proto.CompactTextString(m) }
+func (*TrafficSchedulers) ProtoMessage()    {}
+func (*TrafficSchedulers) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{3}
+}
+
+func (m *TrafficSchedulers) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficSchedulers.Unmarshal(m, b)
+}
+func (m *TrafficSchedulers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficSchedulers.Marshal(b, m, deterministic)
+}
+func (m *TrafficSchedulers) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficSchedulers.Merge(m, src)
+}
+func (m *TrafficSchedulers) XXX_Size() int {
+	return xxx_messageInfo_TrafficSchedulers.Size(m)
+}
+func (m *TrafficSchedulers) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficSchedulers.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficSchedulers proto.InternalMessageInfo
+
+func (m *TrafficSchedulers) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *TrafficSchedulers) GetTrafficScheds() []*TrafficScheduler {
+	if m != nil {
+		return m.TrafficScheds
+	}
+	return nil
+}
+
+type TailDropDiscardConfig struct {
+	QueueSize            uint32   `protobuf:"fixed32,1,opt,name=queue_size,json=queueSize,proto3" json:"queue_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *TailDropDiscardConfig) Reset()         { *m = TailDropDiscardConfig{} }
+func (m *TailDropDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*TailDropDiscardConfig) ProtoMessage()    {}
+func (*TailDropDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{4}
+}
+
+func (m *TailDropDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TailDropDiscardConfig.Unmarshal(m, b)
+}
+func (m *TailDropDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TailDropDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *TailDropDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TailDropDiscardConfig.Merge(m, src)
+}
+func (m *TailDropDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_TailDropDiscardConfig.Size(m)
+}
+func (m *TailDropDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_TailDropDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TailDropDiscardConfig proto.InternalMessageInfo
+
+func (m *TailDropDiscardConfig) GetQueueSize() uint32 {
+	if m != nil {
+		return m.QueueSize
+	}
+	return 0
+}
+
+type RedDiscardConfig struct {
+	MinThreshold         uint32   `protobuf:"fixed32,1,opt,name=min_threshold,json=minThreshold,proto3" json:"min_threshold,omitempty"`
+	MaxThreshold         uint32   `protobuf:"fixed32,2,opt,name=max_threshold,json=maxThreshold,proto3" json:"max_threshold,omitempty"`
+	MaxProbability       uint32   `protobuf:"fixed32,3,opt,name=max_probability,json=maxProbability,proto3" json:"max_probability,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RedDiscardConfig) Reset()         { *m = RedDiscardConfig{} }
+func (m *RedDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*RedDiscardConfig) ProtoMessage()    {}
+func (*RedDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{5}
+}
+
+func (m *RedDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_RedDiscardConfig.Unmarshal(m, b)
+}
+func (m *RedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_RedDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *RedDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RedDiscardConfig.Merge(m, src)
+}
+func (m *RedDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_RedDiscardConfig.Size(m)
+}
+func (m *RedDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_RedDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RedDiscardConfig proto.InternalMessageInfo
+
+func (m *RedDiscardConfig) GetMinThreshold() uint32 {
+	if m != nil {
+		return m.MinThreshold
+	}
+	return 0
+}
+
+func (m *RedDiscardConfig) GetMaxThreshold() uint32 {
+	if m != nil {
+		return m.MaxThreshold
+	}
+	return 0
+}
+
+func (m *RedDiscardConfig) GetMaxProbability() uint32 {
+	if m != nil {
+		return m.MaxProbability
+	}
+	return 0
+}
+
+type WRedDiscardConfig struct {
+	Green                *RedDiscardConfig `protobuf:"bytes,1,opt,name=green,proto3" json:"green,omitempty"`
+	Yellow               *RedDiscardConfig `protobuf:"bytes,2,opt,name=yellow,proto3" json:"yellow,omitempty"`
+	Red                  *RedDiscardConfig `protobuf:"bytes,3,opt,name=red,proto3" json:"red,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *WRedDiscardConfig) Reset()         { *m = WRedDiscardConfig{} }
+func (m *WRedDiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*WRedDiscardConfig) ProtoMessage()    {}
+func (*WRedDiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{6}
+}
+
+func (m *WRedDiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_WRedDiscardConfig.Unmarshal(m, b)
+}
+func (m *WRedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_WRedDiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *WRedDiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WRedDiscardConfig.Merge(m, src)
+}
+func (m *WRedDiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_WRedDiscardConfig.Size(m)
+}
+func (m *WRedDiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_WRedDiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WRedDiscardConfig proto.InternalMessageInfo
+
+func (m *WRedDiscardConfig) GetGreen() *RedDiscardConfig {
+	if m != nil {
+		return m.Green
+	}
+	return nil
+}
+
+func (m *WRedDiscardConfig) GetYellow() *RedDiscardConfig {
+	if m != nil {
+		return m.Yellow
+	}
+	return nil
+}
+
+func (m *WRedDiscardConfig) GetRed() *RedDiscardConfig {
+	if m != nil {
+		return m.Red
+	}
+	return nil
+}
+
+type DiscardConfig struct {
+	DiscardPolicy DiscardPolicy `protobuf:"varint,1,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	// Types that are valid to be assigned to DiscardConfig:
+	//	*DiscardConfig_TailDropDiscardConfig
+	//	*DiscardConfig_RedDiscardConfig
+	//	*DiscardConfig_WredDiscardConfig
+	DiscardConfig        isDiscardConfig_DiscardConfig `protobuf_oneof:"discard_config"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *DiscardConfig) Reset()         { *m = DiscardConfig{} }
+func (m *DiscardConfig) String() string { return proto.CompactTextString(m) }
+func (*DiscardConfig) ProtoMessage()    {}
+func (*DiscardConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{7}
+}
+
+func (m *DiscardConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DiscardConfig.Unmarshal(m, b)
+}
+func (m *DiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DiscardConfig.Marshal(b, m, deterministic)
+}
+func (m *DiscardConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DiscardConfig.Merge(m, src)
+}
+func (m *DiscardConfig) XXX_Size() int {
+	return xxx_messageInfo_DiscardConfig.Size(m)
+}
+func (m *DiscardConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_DiscardConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DiscardConfig proto.InternalMessageInfo
+
+func (m *DiscardConfig) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+type isDiscardConfig_DiscardConfig interface {
+	isDiscardConfig_DiscardConfig()
+}
+
+type DiscardConfig_TailDropDiscardConfig struct {
+	TailDropDiscardConfig *TailDropDiscardConfig `protobuf:"bytes,2,opt,name=tail_drop_discard_config,json=tailDropDiscardConfig,proto3,oneof"`
+}
+
+type DiscardConfig_RedDiscardConfig struct {
+	RedDiscardConfig *RedDiscardConfig `protobuf:"bytes,3,opt,name=red_discard_config,json=redDiscardConfig,proto3,oneof"`
+}
+
+type DiscardConfig_WredDiscardConfig struct {
+	WredDiscardConfig *WRedDiscardConfig `protobuf:"bytes,4,opt,name=wred_discard_config,json=wredDiscardConfig,proto3,oneof"`
+}
+
+func (*DiscardConfig_TailDropDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (*DiscardConfig_RedDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (*DiscardConfig_WredDiscardConfig) isDiscardConfig_DiscardConfig() {}
+
+func (m *DiscardConfig) GetDiscardConfig() isDiscardConfig_DiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetTailDropDiscardConfig() *TailDropDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_TailDropDiscardConfig); ok {
+		return x.TailDropDiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetRedDiscardConfig() *RedDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_RedDiscardConfig); ok {
+		return x.RedDiscardConfig
+	}
+	return nil
+}
+
+func (m *DiscardConfig) GetWredDiscardConfig() *WRedDiscardConfig {
+	if x, ok := m.GetDiscardConfig().(*DiscardConfig_WredDiscardConfig); ok {
+		return x.WredDiscardConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DiscardConfig) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*DiscardConfig_TailDropDiscardConfig)(nil),
+		(*DiscardConfig_RedDiscardConfig)(nil),
+		(*DiscardConfig_WredDiscardConfig)(nil),
+	}
+}
+
+type TrafficQueue struct {
+	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	GemportId            uint32           `protobuf:"fixed32,2,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	PbitMap              string           `protobuf:"bytes,3,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+	AesEncryption        bool             `protobuf:"varint,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
+	Priority             uint32           `protobuf:"fixed32,6,opt,name=priority,proto3" json:"priority,omitempty"`
+	Weight               uint32           `protobuf:"fixed32,7,opt,name=weight,proto3" json:"weight,omitempty"`
+	DiscardPolicy        DiscardPolicy    `protobuf:"varint,8,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	DiscardConfig        *DiscardConfig   `protobuf:"bytes,9,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *TrafficQueue) Reset()         { *m = TrafficQueue{} }
+func (m *TrafficQueue) String() string { return proto.CompactTextString(m) }
+func (*TrafficQueue) ProtoMessage()    {}
+func (*TrafficQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{8}
+}
+
+func (m *TrafficQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficQueue.Unmarshal(m, b)
+}
+func (m *TrafficQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficQueue.Marshal(b, m, deterministic)
+}
+func (m *TrafficQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficQueue.Merge(m, src)
+}
+func (m *TrafficQueue) XXX_Size() int {
+	return xxx_messageInfo_TrafficQueue.Size(m)
+}
+func (m *TrafficQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficQueue proto.InternalMessageInfo
+
+func (m *TrafficQueue) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *TrafficQueue) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetPbitMap() string {
+	if m != nil {
+		return m.PbitMap
+	}
+	return ""
+}
+
+func (m *TrafficQueue) GetAesEncryption() bool {
+	if m != nil {
+		return m.AesEncryption
+	}
+	return false
+}
+
+func (m *TrafficQueue) GetSchedPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+func (m *TrafficQueue) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *TrafficQueue) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+func (m *TrafficQueue) GetDiscardConfig() *DiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+type TrafficQueues struct {
+	IntfId               uint32          `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32          `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32          `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	PortNo               uint32          `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	TrafficQueues        []*TrafficQueue `protobuf:"bytes,6,rep,name=traffic_queues,json=trafficQueues,proto3" json:"traffic_queues,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *TrafficQueues) Reset()         { *m = TrafficQueues{} }
+func (m *TrafficQueues) String() string { return proto.CompactTextString(m) }
+func (*TrafficQueues) ProtoMessage()    {}
+func (*TrafficQueues) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{9}
+}
+
+func (m *TrafficQueues) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TrafficQueues.Unmarshal(m, b)
+}
+func (m *TrafficQueues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TrafficQueues.Marshal(b, m, deterministic)
+}
+func (m *TrafficQueues) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TrafficQueues.Merge(m, src)
+}
+func (m *TrafficQueues) XXX_Size() int {
+	return xxx_messageInfo_TrafficQueues.Size(m)
+}
+func (m *TrafficQueues) XXX_DiscardUnknown() {
+	xxx_messageInfo_TrafficQueues.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TrafficQueues proto.InternalMessageInfo
+
+func (m *TrafficQueues) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *TrafficQueues) GetTrafficQueues() []*TrafficQueue {
+	if m != nil {
+		return m.TrafficQueues
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("tech_profile.Direction", Direction_name, Direction_value)
+	proto.RegisterEnum("tech_profile.SchedulingPolicy", SchedulingPolicy_name, SchedulingPolicy_value)
+	proto.RegisterEnum("tech_profile.AdditionalBW", AdditionalBW_name, AdditionalBW_value)
+	proto.RegisterEnum("tech_profile.DiscardPolicy", DiscardPolicy_name, DiscardPolicy_value)
+	proto.RegisterEnum("tech_profile.InferredAdditionBWIndication", InferredAdditionBWIndication_name, InferredAdditionBWIndication_value)
+	proto.RegisterType((*SchedulerConfig)(nil), "tech_profile.SchedulerConfig")
+	proto.RegisterType((*TrafficShapingInfo)(nil), "tech_profile.TrafficShapingInfo")
+	proto.RegisterType((*TrafficScheduler)(nil), "tech_profile.TrafficScheduler")
+	proto.RegisterType((*TrafficSchedulers)(nil), "tech_profile.TrafficSchedulers")
+	proto.RegisterType((*TailDropDiscardConfig)(nil), "tech_profile.TailDropDiscardConfig")
+	proto.RegisterType((*RedDiscardConfig)(nil), "tech_profile.RedDiscardConfig")
+	proto.RegisterType((*WRedDiscardConfig)(nil), "tech_profile.WRedDiscardConfig")
+	proto.RegisterType((*DiscardConfig)(nil), "tech_profile.DiscardConfig")
+	proto.RegisterType((*TrafficQueue)(nil), "tech_profile.TrafficQueue")
+	proto.RegisterType((*TrafficQueues)(nil), "tech_profile.TrafficQueues")
+}
+
+func init() { proto.RegisterFile("voltha_protos/tech_profile.proto", fileDescriptor_d019a68bffe14cae) }
+
+var fileDescriptor_d019a68bffe14cae = []byte{
+	// 1118 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
+	0x14, 0xf6, 0xda, 0x8d, 0x7f, 0x4e, 0x6c, 0x77, 0x33, 0x25, 0xd4, 0xa4, 0x2d, 0x18, 0x97, 0xaa,
+	0x91, 0x11, 0x31, 0x4a, 0x4b, 0x6f, 0x8a, 0x54, 0xd9, 0x4d, 0xa4, 0x58, 0xa2, 0x69, 0xba, 0x09,
+	0x32, 0xe2, 0x82, 0xd5, 0x78, 0x67, 0xbc, 0x1e, 0x69, 0x3d, 0xb3, 0xcc, 0x8e, 0xeb, 0xa4, 0x57,
+	0xdc, 0xf0, 0x16, 0xdc, 0xf2, 0x02, 0x70, 0x83, 0x78, 0x22, 0x5e, 0x80, 0x7b, 0x34, 0xb3, 0xbb,
+	0xb6, 0xd7, 0x36, 0x29, 0x54, 0x70, 0x37, 0xe7, 0xdb, 0x6f, 0xce, 0x9c, 0x6f, 0xce, 0xcf, 0x0e,
+	0x34, 0x5f, 0x8b, 0x40, 0x8d, 0xb1, 0x1b, 0x4a, 0xa1, 0x44, 0xd4, 0x51, 0xd4, 0x1b, 0xeb, 0xf5,
+	0x88, 0x05, 0xf4, 0xc0, 0x60, 0xa8, 0xba, 0x8c, 0xed, 0xdd, 0xf5, 0x85, 0xf0, 0x03, 0xda, 0xc1,
+	0x21, 0xeb, 0x60, 0xce, 0x85, 0xc2, 0x8a, 0x09, 0x1e, 0xc5, 0xdc, 0xd6, 0x0f, 0x79, 0xb8, 0x79,
+	0xee, 0x8d, 0x29, 0x99, 0x06, 0x54, 0x3e, 0x17, 0x7c, 0xc4, 0x7c, 0xf4, 0x05, 0x54, 0x08, 0x93,
+	0xd4, 0xd3, 0xbc, 0x86, 0xd5, 0xb4, 0xf6, 0xeb, 0x87, 0xb7, 0x0f, 0x32, 0xe7, 0x1c, 0xa5, 0x9f,
+	0x9d, 0x05, 0x13, 0x3d, 0x83, 0x1a, 0x26, 0x84, 0xe9, 0x35, 0x0e, 0xdc, 0xe1, 0xac, 0x91, 0x37,
+	0x5b, 0xf7, 0xb2, 0x5b, 0xbb, 0x73, 0x4a, 0x6f, 0xe0, 0x54, 0x17, 0x1b, 0x7a, 0x33, 0xb4, 0x07,
+	0xe5, 0x50, 0x32, 0x21, 0x99, 0xba, 0x6a, 0x14, 0x9a, 0xd6, 0x7e, 0xc9, 0x99, 0xdb, 0xe8, 0x7d,
+	0x28, 0xce, 0x28, 0xf3, 0xc7, 0xaa, 0x71, 0xc3, 0x7c, 0x49, 0x2c, 0xd4, 0x85, 0x6a, 0xa4, 0xc3,
+	0x77, 0x43, 0x11, 0x30, 0xef, 0xaa, 0xb1, 0x65, 0xce, 0xfc, 0x30, 0x7b, 0x66, 0x22, 0x90, 0x71,
+	0xff, 0xcc, 0xb0, 0x9c, 0x6d, 0xb3, 0x27, 0x36, 0x5a, 0xbf, 0x59, 0x80, 0x2e, 0x24, 0x1e, 0x8d,
+	0x98, 0x77, 0x3e, 0xc6, 0x21, 0xe3, 0x7e, 0x9f, 0x8f, 0x04, 0xb2, 0xa1, 0xe0, 0x31, 0x69, 0xf4,
+	0x97, 0x1c, 0xbd, 0x34, 0xc8, 0x30, 0x32, 0xb2, 0x34, 0x32, 0x8c, 0x34, 0x12, 0x32, 0x99, 0x04,
+	0xab, 0x97, 0x06, 0x19, 0x46, 0x49, 0x90, 0x7a, 0xa9, 0x11, 0x9f, 0x49, 0x13, 0x58, 0xc9, 0xd1,
+	0x4b, 0x74, 0x02, 0x80, 0x09, 0x71, 0x87, 0x33, 0x97, 0x71, 0xd2, 0x28, 0x9a, 0x88, 0xdb, 0xd9,
+	0x88, 0xfb, 0x7c, 0x44, 0xa5, 0xa4, 0x24, 0xbd, 0xad, 0xde, 0xa0, 0xcf, 0x09, 0xf3, 0x4c, 0xea,
+	0x9c, 0x32, 0x26, 0xa4, 0x37, 0xeb, 0x73, 0xd2, 0xfa, 0xd3, 0x02, 0x3b, 0x0d, 0x3d, 0x4d, 0xe2,
+	0xbb, 0xa6, 0xef, 0x03, 0x28, 0xe3, 0x20, 0x10, 0x9e, 0xcb, 0x48, 0x22, 0xb1, 0x64, 0xec, 0x3e,
+	0x41, 0x4f, 0xa1, 0x12, 0xa5, 0xee, 0x8d, 0xd8, 0xed, 0xc3, 0x7b, 0x1b, 0x6f, 0x38, 0x2d, 0x21,
+	0x67, 0xc1, 0x47, 0x0e, 0xbc, 0xa7, 0xe2, 0x10, 0xdd, 0x28, 0xbe, 0x5e, 0x97, 0xf1, 0x91, 0x30,
+	0x57, 0xb4, 0x7d, 0xd8, 0xcc, 0xfa, 0x59, 0xcf, 0x83, 0x83, 0xd4, 0x1a, 0xd6, 0xfa, 0xdd, 0x82,
+	0x9d, 0x55, 0xdd, 0x11, 0xba, 0x0d, 0x25, 0xc6, 0xd5, 0x48, 0x0b, 0x88, 0xb3, 0x56, 0xd4, 0x66,
+	0x9f, 0xa0, 0x5d, 0x28, 0x0a, 0x3e, 0x5d, 0x08, 0xdb, 0x12, 0x7c, 0x1a, 0xc3, 0x53, 0xce, 0x34,
+	0x1c, 0xa7, 0x6b, 0x6b, 0xca, 0x59, 0x9f, 0x68, 0x37, 0xa1, 0x90, 0xca, 0xe5, 0x22, 0x49, 0x5a,
+	0x51, 0x9b, 0xa7, 0x02, 0x1d, 0x43, 0x7d, 0xae, 0x44, 0x9f, 0x1a, 0x35, 0x0a, 0xcd, 0xc2, 0xfe,
+	0xf6, 0x6a, 0xb5, 0xad, 0x06, 0xe6, 0xd4, 0xd4, 0x12, 0x12, 0xb5, 0x9e, 0xc0, 0xee, 0x05, 0x66,
+	0xc1, 0x91, 0x14, 0xe1, 0x11, 0x8b, 0x3c, 0x2c, 0x49, 0xd2, 0x77, 0xf7, 0x00, 0xbe, 0x9f, 0xd2,
+	0x29, 0x75, 0x23, 0xf6, 0x86, 0x26, 0x12, 0x2a, 0x06, 0x39, 0x67, 0x6f, 0x68, 0xeb, 0x47, 0x0b,
+	0x6c, 0x87, 0x92, 0xec, 0x9e, 0xfb, 0x50, 0x9b, 0x30, 0xee, 0xaa, 0xb1, 0xa4, 0xd1, 0x58, 0x04,
+	0xa9, 0xf2, 0xea, 0x84, 0xf1, 0x8b, 0x14, 0x33, 0x24, 0x7c, 0xb9, 0x44, 0xca, 0x27, 0x24, 0x7c,
+	0xb9, 0x20, 0x3d, 0x84, 0x9b, 0x9a, 0x14, 0x4a, 0x31, 0xc4, 0x43, 0x16, 0x2c, 0x9a, 0xb0, 0x3e,
+	0xc1, 0x97, 0x67, 0x0b, 0xb4, 0xf5, 0xab, 0x05, 0x3b, 0x83, 0xb5, 0x40, 0x1e, 0xc3, 0x96, 0x2f,
+	0x29, 0x8d, 0x2b, 0x6e, 0xed, 0x4e, 0x56, 0xe9, 0x4e, 0x4c, 0x46, 0x4f, 0xa0, 0x78, 0x45, 0x83,
+	0x40, 0xc4, 0xc3, 0xe2, 0xed, 0xdb, 0x12, 0x36, 0xfa, 0x1c, 0x0a, 0x92, 0x92, 0xa4, 0x16, 0xdf,
+	0xb6, 0x49, 0x53, 0x5b, 0x7f, 0xe4, 0xa1, 0x96, 0x8d, 0xb8, 0x07, 0x75, 0x12, 0x03, 0xe9, 0xf0,
+	0x88, 0x9b, 0xe5, 0xce, 0x6a, 0xb3, 0x18, 0x4e, 0x32, 0x39, 0x6a, 0x64, 0xd9, 0x44, 0xdf, 0x41,
+	0x43, 0x61, 0x16, 0xb8, 0x44, 0x8a, 0xd0, 0x4d, 0xbd, 0x79, 0xc6, 0x7f, 0xa2, 0xe8, 0xfe, 0x4a,
+	0x71, 0x6c, 0xca, 0xfc, 0x49, 0xce, 0xd9, 0x55, 0x1b, 0x4b, 0xe2, 0x14, 0x90, 0xa4, 0x64, 0xd5,
+	0xf3, 0x3f, 0x92, 0x7d, 0x92, 0x73, 0x6c, 0xb9, 0x9a, 0xa5, 0x57, 0x70, 0x6b, 0xb6, 0xc1, 0x61,
+	0xdc, 0x8b, 0x1f, 0x65, 0x1d, 0x0e, 0x36, 0x78, 0xdc, 0x99, 0xad, 0xba, 0xec, 0xd9, 0x8b, 0x6b,
+	0x8c, 0xbd, 0xb5, 0x7e, 0x2e, 0x40, 0x35, 0x69, 0x82, 0x57, 0xba, 0x7a, 0xdf, 0x75, 0x22, 0xdd,
+	0x03, 0xf0, 0xe9, 0xc4, 0xf4, 0xe2, 0xbc, 0x75, 0x2b, 0x09, 0xd2, 0x27, 0x7a, 0x60, 0x85, 0x43,
+	0xa6, 0xdc, 0x09, 0x0e, 0xcd, 0x8d, 0x54, 0x9c, 0x92, 0xb6, 0x5f, 0xe0, 0x10, 0x3d, 0x80, 0x3a,
+	0xa6, 0x91, 0x4b, 0xb9, 0x27, 0xaf, 0x42, 0x73, 0xaa, 0x56, 0x58, 0x76, 0x6a, 0x98, 0x46, 0xc7,
+	0x73, 0xf0, 0x3f, 0xf8, 0x79, 0x64, 0xfe, 0x59, 0xc5, 0xbf, 0xfd, 0x67, 0x95, 0x32, 0xff, 0xac,
+	0xf5, 0xc2, 0x2b, 0xff, 0xeb, 0xc2, 0xeb, 0xad, 0xde, 0x7a, 0xa3, 0x62, 0x72, 0xb8, 0xd9, 0x47,
+	0xd2, 0x08, 0xa9, 0x8f, 0xd8, 0x6c, 0xfd, 0x62, 0x41, 0x6d, 0x39, 0x4f, 0xff, 0xff, 0x04, 0xed,
+	0x2e, 0x26, 0xa8, 0x99, 0x6b, 0x51, 0xa3, 0x68, 0x26, 0xe8, 0xde, 0xc6, 0x09, 0x6a, 0x82, 0x9a,
+	0x4f, 0xcf, 0x38, 0xc4, 0xf6, 0x97, 0x50, 0x99, 0x17, 0x0b, 0xaa, 0x42, 0xf9, 0xeb, 0xb3, 0xf3,
+	0x0b, 0xe7, 0xb8, 0xfb, 0xc2, 0xce, 0xa1, 0x3a, 0xc0, 0xd1, 0xcb, 0xc1, 0x69, 0x62, 0x5b, 0x68,
+	0x07, 0x6a, 0xbd, 0xfe, 0x51, 0xdf, 0x39, 0x7e, 0x7e, 0xd1, 0x7f, 0x79, 0xda, 0xfd, 0xca, 0xce,
+	0xb7, 0x9f, 0x82, 0xbd, 0x9a, 0x4f, 0x54, 0x82, 0xc2, 0xc0, 0x71, 0xec, 0x1c, 0x42, 0x50, 0x3f,
+	0x57, 0x92, 0x79, 0xea, 0x2c, 0xc9, 0xa0, 0x6d, 0x21, 0x80, 0xe2, 0xc9, 0xd5, 0x50, 0x32, 0x62,
+	0xe7, 0xdb, 0x1c, 0xaa, 0xcb, 0xaf, 0x17, 0xb4, 0x0b, 0x3b, 0xcb, 0xb6, 0x7b, 0x2a, 0x38, 0xb5,
+	0x73, 0xe8, 0x16, 0xdc, 0xcc, 0xc2, 0x5d, 0xdb, 0x42, 0x77, 0xe0, 0x76, 0x06, 0xec, 0xd1, 0x48,
+	0x1d, 0x8f, 0x46, 0x42, 0x2a, 0x3b, 0xbf, 0xe6, 0xa8, 0x3b, 0x55, 0xc2, 0x2e, 0xb4, 0x9f, 0xcd,
+	0x27, 0x56, 0x12, 0x69, 0x15, 0xca, 0xe9, 0xfc, 0xb0, 0x73, 0xa8, 0x06, 0x95, 0xc1, 0xdc, 0xb4,
+	0xb4, 0x0c, 0x87, 0x12, 0x3b, 0x8f, 0xca, 0x70, 0x43, 0xb7, 0xae, 0x5d, 0x68, 0xff, 0x64, 0xc1,
+	0xdd, 0xeb, 0x5e, 0x12, 0xe8, 0x01, 0x7c, 0x7c, 0xdd, 0xf7, 0x54, 0xd1, 0x3e, 0x7c, 0x72, 0x2d,
+	0xad, 0x1b, 0x45, 0x53, 0x49, 0x89, 0x6d, 0xa1, 0x4f, 0xe1, 0xe1, 0xb5, 0xcc, 0x65, 0xd9, 0xbd,
+	0x6f, 0xa0, 0x29, 0xa4, 0x7f, 0x20, 0x42, 0xca, 0x3d, 0x21, 0xc9, 0x41, 0xfc, 0xb0, 0xcd, 0x94,
+	0xc2, 0xb7, 0x8f, 0x7d, 0xa6, 0xc6, 0xd3, 0xe1, 0x81, 0x27, 0x26, 0x9d, 0x94, 0xd8, 0x89, 0x89,
+	0x9f, 0x25, 0x2f, 0xe0, 0xd7, 0x8f, 0x3a, 0xbe, 0xc8, 0xbc, 0x83, 0x87, 0x45, 0xf3, 0xe9, 0xd1,
+	0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x31, 0x2e, 0x0c, 0xef, 0x2c, 0x0b, 0x00, 0x00,
+}