VOL-4077: Improve storage usage on etcd
- the onu adapter will now receive the tp instance on the
  inter-container kafka message and need not reach the etcd store
  to fetch it.
- on reconcile, the onu adapter need to go to the kv store to
  fetch the tp instance, but request the tp instance via a new
  API towards openolt adapter which in-turn will fetch from the
  cache.
- re-org the code in onu-metrics-manager to restore pm-data
  on reconcile to avoid panics by accessing uninitialzed data
  if ani-fsm were to try adding gem port for monitoring
  before pm-data is initialized properly.

Change-Id: I82a6de2772155f6e08390b671fe26d692dd02c99
diff --git a/.golangci.yml b/.golangci.yml
index 1eb9e9e..c645c98 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -90,6 +90,7 @@
     - "don't use underscores in Go names; method Get_onu_images"
     - "don't use underscores in Go names; method Activate_onu_image"
     - "don't use underscores in Go names; method Commit_onu_image"
+    - "don't use underscores in Go names; method Process_tech_profile_instance_request"
     - "Error return value of `dh.coreProxy.PortStateUpdate` is not checked"
     - "Error return value of `rxCallbackEntry.cbFunction` is not checked"
     - "Error return value of `oo.sendNextRequest` is not checked"
diff --git a/Makefile b/Makefile
index ef1dea4..40525f1 100755
--- a/Makefile
+++ b/Makefile
@@ -73,9 +73,9 @@
 ## Local Development Helpers
 local-lib-go: ## Copies a local version of the voltha-lib-go dependency into the vendor directory
 ifdef LOCAL_LIB_GO
-	rm -rf vendor/github.com/opencord/voltha-lib-go/v4/pkg
-	mkdir -p vendor/github.com/opencord/voltha-lib-go/v4/pkg
-	cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v4/pkg/
+	rm -rf vendor/github.com/opencord/voltha-lib-go/v5/pkg
+	mkdir -p vendor/github.com/opencord/voltha-lib-go/v5/pkg
+	cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v5/pkg/
 endif
 
 build: docker-build ## Alias for 'docker build'
diff --git a/VERSION b/VERSION
index 3c30732..c725c59 100755
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.3.2-dev212
+1.3.2-dev213
diff --git a/cmd/openonu-adapter/common.go b/cmd/openonu-adapter/common.go
index 9c871ef..1c0be5e 100644
--- a/cmd/openonu-adapter/common.go
+++ b/cmd/openonu-adapter/common.go
@@ -18,7 +18,7 @@
 package main
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/cmd/openonu-adapter/main.go b/cmd/openonu-adapter/main.go
index d5a396b..c42cef4 100644
--- a/cmd/openonu-adapter/main.go
+++ b/cmd/openonu-adapter/main.go
@@ -28,17 +28,17 @@
 	"syscall"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
-	com "github.com/opencord/voltha-lib-go/v4/pkg/adapters/common"
-	conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	"github.com/opencord/voltha-lib-go/v4/pkg/probe"
-	"github.com/opencord/voltha-lib-go/v4/pkg/version"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	com "github.com/opencord/voltha-lib-go/v5/pkg/adapters/common"
+	conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/probe"
+	"github.com/opencord/voltha-lib-go/v5/pkg/version"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 
diff --git a/docker/Dockerfile.openonu b/docker/Dockerfile.openonu
index 1aad701..34664cc 100755
--- a/docker/Dockerfile.openonu
+++ b/docker/Dockerfile.openonu
@@ -40,13 +40,13 @@
 RUN \
 CGO_ENABLED=$CGO_PARAMETER go build $EXTRA_GO_BUILD_TAGS -mod=vendor -o /app/openonu \
 -ldflags \
-"-X github.com/opencord/voltha-lib-go/v4/pkg/version.version=$org_label_schema_version \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.vcsRef=$org_label_schema_vcs_ref \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.vcsDirty=$org_opencord_vcs_dirty \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.goVersion=$(go version 2>&1 | sed -E  's/.*go([0-9]+\.[0-9]+\.[0-9]+).*/\1/g') \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.os=$(go env GOHOSTOS) \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.arch=$(go env GOHOSTARCH) \
--X github.com/opencord/voltha-lib-go/v4/pkg/version.buildTime=$org_label_schema_build_date" \
+"-X github.com/opencord/voltha-lib-go/v5/pkg/version.version=$org_label_schema_version \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.vcsRef=$org_label_schema_vcs_ref \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.vcsDirty=$org_opencord_vcs_dirty \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.goVersion=$(go version 2>&1 | sed -E  's/.*go([0-9]+\.[0-9]+\.[0-9]+).*/\1/g') \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.os=$(go env GOHOSTOS) \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.arch=$(go env GOHOSTARCH) \
+-X github.com/opencord/voltha-lib-go/v5/pkg/version.buildTime=$org_label_schema_build_date" \
 ./cmd/openonu-adapter/
 
 WORKDIR /app
diff --git a/go.mod b/go.mod
index d4ae50f..6438888 100644
--- a/go.mod
+++ b/go.mod
@@ -10,8 +10,8 @@
 	github.com/google/gopacket v1.1.17
 	github.com/looplab/fsm v0.2.0
 	github.com/opencord/omci-lib-go v1.1.0
-	github.com/opencord/voltha-lib-go/v4 v4.3.4
-	github.com/opencord/voltha-protos/v4 v4.1.10
+	github.com/opencord/voltha-lib-go/v5 v5.0.2
+	github.com/opencord/voltha-protos/v4 v4.2.0
 	github.com/stretchr/testify v1.6.1
 	google.golang.org/grpc v1.25.1 // indirect
 )
diff --git a/go.sum b/go.sum
index 22c460e..a794265 100644
--- a/go.sum
+++ b/go.sum
@@ -152,11 +152,10 @@
 github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/opencord/omci-lib-go v1.1.0 h1:ICc29xMslz9fNqtvnN9tzS0yxRlugzKx0fUGABlwHNM=
 github.com/opencord/omci-lib-go v1.1.0/go.mod h1:moNk4j00XaM3olsu4a8lRAqGmcZJoyIbxtSr+VERLq4=
-github.com/opencord/voltha-lib-go/v4 v4.3.4 h1:+v+d1EfvtUjtotnA7ONuHJpG3Dnwqf5HA1caQPHCCGs=
-github.com/opencord/voltha-lib-go/v4 v4.3.4/go.mod h1:x0a7TxyzxPFaiewkbFiuy0+ftX5w4zeCRlFyyGZ4hhw=
-github.com/opencord/voltha-protos/v4 v4.1.2/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
-github.com/opencord/voltha-protos/v4 v4.1.10 h1:Hie4kKEIiCybgBAVQzaD9IkJpSMI7Ssz5kSYHDE6FHU=
-github.com/opencord/voltha-protos/v4 v4.1.10/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
+github.com/opencord/voltha-lib-go/v5 v5.0.2 h1:nLs42QM75BhKt4eXLdHhQwRPLrI2V2BjWJJlzGMUixg=
+github.com/opencord/voltha-lib-go/v5 v5.0.2/go.mod h1:i1fwPMicFccG38L200+IQAlfHSbszWg//jF1pDQxTPQ=
+github.com/opencord/voltha-protos/v4 v4.2.0 h1:QJZqHPRKa1E1xh40F3UA4xSjBI+6EmW7OfIcJqPNc4A=
+github.com/opencord/voltha-protos/v4 v4.2.0/go.mod h1:wNzWqmTwe7+DbYbpmOX6eMlglREtMkNxIDv3lyI2bco=
 github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
diff --git a/internal/pkg/onuadaptercore/adapter_download_manager.go b/internal/pkg/onuadaptercore/adapter_download_manager.go
index 0bf8857..6803aed 100644
--- a/internal/pkg/onuadaptercore/adapter_download_manager.go
+++ b/internal/pkg/onuadaptercore/adapter_download_manager.go
@@ -32,7 +32,7 @@
 
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 // ### downloadToAdapter related definitions  ####
diff --git a/internal/pkg/onuadaptercore/alarm_manager.go b/internal/pkg/onuadaptercore/alarm_manager.go
index 8fac689..0dbc5b8 100644
--- a/internal/pkg/onuadaptercore/alarm_manager.go
+++ b/internal/pkg/onuadaptercore/alarm_manager.go
@@ -28,8 +28,8 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
diff --git a/internal/pkg/onuadaptercore/common.go b/internal/pkg/onuadaptercore/common.go
index e011f04..0a39a7e 100644
--- a/internal/pkg/onuadaptercore/common.go
+++ b/internal/pkg/onuadaptercore/common.go
@@ -18,7 +18,7 @@
 package adaptercoreonu
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/internal/pkg/onuadaptercore/device_handler.go b/internal/pkg/onuadaptercore/device_handler.go
index 00cc09b..e5444d4 100644
--- a/internal/pkg/onuadaptercore/device_handler.go
+++ b/internal/pkg/onuadaptercore/device_handler.go
@@ -21,6 +21,7 @@
 	"context"
 	"errors"
 	"fmt"
+	"github.com/opencord/voltha-protos/v4/go/tech_profile"
 	"strconv"
 	"sync"
 	"time"
@@ -29,11 +30,11 @@
 	"github.com/golang/protobuf/ptypes"
 	"github.com/looplab/fsm"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
-	flow "github.com/opencord/voltha-lib-go/v4/pkg/flows"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	flow "github.com/opencord/voltha-lib-go/v5/pkg/flows"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	vc "github.com/opencord/voltha-protos/v4/go/common"
 	"github.com/opencord/voltha-protos/v4/go/extension"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
@@ -138,6 +139,7 @@
 	drReconcileFailed                  = 13
 	drReconcileMaxTimeout              = 14
 	drReconcileCanceled                = 15
+	drTechProfileConfigDownloadFailed  = 16
 )
 
 var deviceReasonMap = map[uint8]string{
@@ -147,6 +149,7 @@
 	drDiscoveryMibsyncComplete:         "discovery-mibsync-complete",
 	drInitialMibDownloaded:             "initial-mib-downloaded",
 	drTechProfileConfigDownloadSuccess: "tech-profile-config-download-success",
+	drTechProfileConfigDownloadFailed:  "tech-profile-config-download-failed",
 	drOmciFlowsPushed:                  "omci-flows-pushed",
 	drOmciAdminLock:                    "omci-admin-lock",
 	drOnuReenabled:                     "onu-reenabled",
@@ -409,52 +412,59 @@
 			techProfMsg.UniId, dh.deviceID))
 	}
 	uniID := uint8(techProfMsg.UniId)
-	tpID, err := GetTpIDFromTpPath(techProfMsg.Path)
+	tpID, err := GetTpIDFromTpPath(techProfMsg.TpInstancePath)
 	if err != nil {
-		logger.Errorw(ctx, "error-parsing-tpid-from-tppath", log.Fields{"err": err, "tp-path": techProfMsg.Path})
+		logger.Errorw(ctx, "error-parsing-tpid-from-tppath", log.Fields{"err": err, "tp-path": techProfMsg.TpInstancePath})
 		return err
 	}
-	logger.Debugw(ctx, "unmarshal-techprof-msg-body", log.Fields{"uniID": uniID, "tp-path": techProfMsg.Path, "tpID": tpID})
+	logger.Debugw(ctx, "unmarshal-techprof-msg-body", log.Fields{"uniID": uniID, "tp-path": techProfMsg.TpInstancePath, "tpID": tpID})
 
-	if bTpModify := pDevEntry.updateOnuUniTpPath(ctx, uniID, uint8(tpID), techProfMsg.Path); bTpModify {
-		logger.Debugw(ctx, "onu-uni-tp-path-modified", log.Fields{"uniID": uniID, "tp-path": techProfMsg.Path, "tpID": tpID})
-		//	if there has been some change for some uni TechProfilePath
-		//in order to allow concurrent calls to other dh instances we do not wait for execution here
-		//but doing so we can not indicate problems to the caller (who does what with that then?)
-		//by now we just assume straightforward successful execution
-		//TODO!!! Generally: In this scheme it would be good to have some means to indicate
-		//  possible problems to the caller later autonomously
+	if bTpModify := pDevEntry.updateOnuUniTpPath(ctx, uniID, uint8(tpID), techProfMsg.TpInstancePath); bTpModify {
 
-		// deadline context to ensure completion of background routines waited for
-		//20200721: 10s proved to be less in 8*8 ONU test on local vbox machine with debug, might be further adapted
-		deadline := time.Now().Add(dh.pOpenOnuAc.maxTimeoutInterAdapterComm) //allowed run time to finish before execution
-		dctx, cancel := context.WithDeadline(context.Background(), deadline)
+		switch tpInst := techProfMsg.TechTpInstance.(type) {
+		case *ic.InterAdapterTechProfileDownloadMessage_TpInstance:
+			logger.Debugw(ctx, "onu-uni-tp-path-modified", log.Fields{"uniID": uniID, "tp-path": techProfMsg.TpInstancePath, "tpID": tpID})
+			//	if there has been some change for some uni TechProfilePath
+			//in order to allow concurrent calls to other dh instances we do not wait for execution here
+			//but doing so we can not indicate problems to the caller (who does what with that then?)
+			//by now we just assume straightforward successful execution
+			//TODO!!! Generally: In this scheme it would be good to have some means to indicate
+			//  possible problems to the caller later autonomously
 
-		dh.pOnuTP.resetTpProcessingErrorIndication(uniID, tpID)
+			// deadline context to ensure completion of background routines waited for
+			//20200721: 10s proved to be less in 8*8 ONU test on local vbox machine with debug, might be further adapted
+			deadline := time.Now().Add(dh.pOpenOnuAc.maxTimeoutInterAdapterComm) //allowed run time to finish before execution
+			dctx, cancel := context.WithDeadline(context.Background(), deadline)
 
-		var wg sync.WaitGroup
-		wg.Add(1) // for the 1 go routine to finish
-		// attention: deadline completion check and wg.Done is to be done in both routines
-		go dh.pOnuTP.configureUniTp(log.WithSpanFromContext(dctx, ctx), uniID, techProfMsg.Path, &wg)
-		dh.waitForCompletion(ctx, cancel, &wg, "TechProfDwld") //wait for background process to finish
-		if tpErr := dh.pOnuTP.getTpProcessingErrorIndication(uniID, tpID); tpErr != nil {
-			logger.Errorw(ctx, "error-processing-tp", log.Fields{"device-id": dh.deviceID, "err": tpErr, "tp-path": techProfMsg.Path})
-			return tpErr
+			dh.pOnuTP.resetTpProcessingErrorIndication(uniID, tpID)
+
+			var wg sync.WaitGroup
+			wg.Add(1) // for the 1 go routine to finish
+			// attention: deadline completion check and wg.Done is to be done in both routines
+			go dh.pOnuTP.configureUniTp(log.WithSpanFromContext(dctx, ctx), uniID, techProfMsg.TpInstancePath, *tpInst.TpInstance, &wg)
+			dh.waitForCompletion(ctx, cancel, &wg, "TechProfDwld") //wait for background process to finish
+			if tpErr := dh.pOnuTP.getTpProcessingErrorIndication(uniID, tpID); tpErr != nil {
+				logger.Errorw(ctx, "error-processing-tp", log.Fields{"device-id": dh.deviceID, "err": tpErr, "tp-path": techProfMsg.TpInstancePath})
+				return tpErr
+			}
+			deadline = time.Now().Add(dh.pOpenOnuAc.maxTimeoutInterAdapterComm) //allowed run time to finish before execution
+			dctx2, cancel2 := context.WithDeadline(context.Background(), deadline)
+			pDevEntry.resetKvProcessingErrorIndication()
+			wg.Add(1) // for the 1 go routine to finish
+			go pDevEntry.updateOnuKvStore(log.WithSpanFromContext(dctx2, ctx), &wg)
+			dh.waitForCompletion(ctx, cancel2, &wg, "TechProfDwld") //wait for background process to finish
+			if kvErr := pDevEntry.getKvProcessingErrorIndication(); kvErr != nil {
+				logger.Errorw(ctx, "error-updating-KV", log.Fields{"device-id": dh.deviceID, "err": kvErr, "tp-path": techProfMsg.TpInstancePath})
+				return kvErr
+			}
+			return nil
+		default:
+			logger.Errorw(ctx, "unsupported-tp-instance-type", log.Fields{"tp-path": techProfMsg.TpInstancePath})
+			return fmt.Errorf("unsupported-tp-instance-type--tp-id-%v", techProfMsg.TpInstancePath)
 		}
-		deadline = time.Now().Add(dh.pOpenOnuAc.maxTimeoutInterAdapterComm) //allowed run time to finish before execution
-		dctx2, cancel2 := context.WithDeadline(context.Background(), deadline)
-		pDevEntry.resetKvProcessingErrorIndication()
-		wg.Add(1) // for the 1 go routine to finish
-		go pDevEntry.updateOnuKvStore(log.WithSpanFromContext(dctx2, ctx), &wg)
-		dh.waitForCompletion(ctx, cancel2, &wg, "TechProfDwld") //wait for background process to finish
-		if kvErr := pDevEntry.getKvProcessingErrorIndication(); kvErr != nil {
-			logger.Errorw(ctx, "error-updating-KV", log.Fields{"device-id": dh.deviceID, "err": kvErr, "tp-path": techProfMsg.Path})
-			return kvErr
-		}
-		return nil
 	}
 	// no change, nothing really to do - return success
-	logger.Debugw(ctx, "onu-uni-tp-path-not-modified", log.Fields{"uniID": uniID, "tp-path": techProfMsg.Path, "tpID": tpID})
+	logger.Debugw(ctx, "onu-uni-tp-path-not-modified", log.Fields{"uniID": uniID, "tp-path": techProfMsg.TpInstancePath, "tpID": tpID})
 	return nil
 }
 
@@ -493,9 +503,9 @@
 			delGemPortMsg.UniId, dh.deviceID))
 	}
 	uniID := uint8(delGemPortMsg.UniId)
-	tpID, err := GetTpIDFromTpPath(delGemPortMsg.TpPath)
+	tpID, err := GetTpIDFromTpPath(delGemPortMsg.TpInstancePath)
 	if err != nil {
-		logger.Errorw(ctx, "error-extracting-tp-id-from-tp-path", log.Fields{"err": err, "tp-path": delGemPortMsg.TpPath})
+		logger.Errorw(ctx, "error-extracting-tp-id-from-tp-path", log.Fields{"err": err, "tp-path": delGemPortMsg.TpInstancePath})
 		return err
 	}
 
@@ -509,7 +519,7 @@
 
 	var wg sync.WaitGroup
 	wg.Add(1) // for the 1 go routine to finish
-	go dh.pOnuTP.deleteTpResource(log.WithSpanFromContext(dctx, ctx), uniID, tpID, delGemPortMsg.TpPath,
+	go dh.pOnuTP.deleteTpResource(log.WithSpanFromContext(dctx, ctx), uniID, tpID, delGemPortMsg.TpInstancePath,
 		cResourceGemPort, delGemPortMsg.GemPortId, &wg)
 	dh.waitForCompletion(ctx, cancel, &wg, "GemDelete") //wait for background process to finish
 
@@ -551,7 +561,7 @@
 			delTcontMsg.UniId, dh.deviceID))
 	}
 	uniID := uint8(delTcontMsg.UniId)
-	tpPath := delTcontMsg.TpPath
+	tpPath := delTcontMsg.TpInstancePath
 	tpID, err := GetTpIDFromTpPath(tpPath)
 	if err != nil {
 		logger.Errorw(ctx, "error-extracting-tp-id-from-tp-path", log.Fields{"err": err, "tp-path": tpPath})
@@ -569,7 +579,7 @@
 
 		var wg sync.WaitGroup
 		wg.Add(2) // for the 2 go routines to finish
-		go dh.pOnuTP.deleteTpResource(log.WithSpanFromContext(dctx, ctx), uniID, tpID, delTcontMsg.TpPath,
+		go dh.pOnuTP.deleteTpResource(log.WithSpanFromContext(dctx, ctx), uniID, tpID, delTcontMsg.TpInstancePath,
 			cResourceTcont, delTcontMsg.AllocId, &wg)
 		// Removal of the tcont/alloc id mapping represents the removal of the tech profile
 		go pDevEntry.updateOnuKvStore(log.WithSpanFromContext(dctx, ctx), &wg)
@@ -822,7 +832,7 @@
 		} else {
 			logger.Errorw(ctx, "reconciling - restoring OnuTp-data failed - abort", log.Fields{"err": err, "device-id": dh.deviceID})
 		}
-		dh.stopReconciling(ctx)
+		dh.stopReconciling(ctx, false)
 		return
 	}
 	var onuIndication oop.OnuIndication
@@ -842,7 +852,7 @@
 	if pDevEntry == nil {
 		logger.Errorw(ctx, "No valid OnuDevice - aborting", log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, false)
 		}
 		return
 	}
@@ -855,12 +865,14 @@
 		logger.Debugw(ctx, "reconciling - no uni-configs have been stored before adapter restart - terminate reconcilement",
 			log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		}
 		return
 	}
-	techProfsFound := false
 	flowsFound := false
+	techProfsFound := false
+	techProfInstLoadFailed := false
+outerLoop:
 	for _, uniData := range pDevEntry.sOnuPersistentData.PersUniConfig {
 		//TODO: check for uni-port specific reconcilement in case of multi-uni-port-per-onu-support
 		if len(uniData.PersTpPathMap) == 0 {
@@ -868,8 +880,31 @@
 				log.Fields{"uni-id": uniData.PersUniID, "device-id": dh.deviceID})
 			continue
 		}
-		techProfsFound = true
+		techProfsFound = true // set to true if we found TP once for any UNI port
 		for tpID := range uniData.PersTpPathMap {
+			// Request the TpInstance again from the openolt adapter in case of reconcile
+			iaTechTpInst, err := dh.AdapterProxy.TechProfileInstanceRequest(ctx, uniData.PersTpPathMap[tpID],
+				dh.device.ParentPortNo, dh.device.ProxyAddress.OnuId, uint32(uniData.PersUniID),
+				dh.pOpenOnuAc.config.Topic, dh.ProxyAddressType,
+				dh.parentID, dh.ProxyAddressID)
+			if err != nil || iaTechTpInst == nil {
+				logger.Errorw(ctx, "error fetching tp instance",
+					log.Fields{"tp-id": tpID, "tpPath": uniData.PersTpPathMap[tpID], "uni-id": uniData.PersUniID, "device-id": dh.deviceID, "err": err})
+				techProfInstLoadFailed = true // stop loading tp instance as soon as we hit failure
+				break outerLoop
+			}
+			var tpInst tech_profile.TechProfileInstance
+			switch techTpInst := iaTechTpInst.TechTpInstance.(type) {
+			case *ic.InterAdapterTechProfileDownloadMessage_TpInstance: // supports only GPON, XGPON, XGS-PON
+				tpInst = *techTpInst.TpInstance
+				logger.Debugw(ctx, "received-tp-instance-successfully-after-reconcile", log.Fields{"tp-id": tpID, "tpPath": uniData.PersTpPathMap[tpID], "uni-id": uniData.PersUniID, "device-id": dh.deviceID})
+
+			default: // do not support epon or other tech
+				logger.Errorw(ctx, "unsupported-tech", log.Fields{"tp-id": tpID, "tpPath": uniData.PersTpPathMap[tpID], "uni-id": uniData.PersUniID, "device-id": dh.deviceID})
+				techProfInstLoadFailed = true // stop loading tp instance as soon as we hit failure
+				break outerLoop
+			}
+
 			// deadline context to ensure completion of background routines waited for
 			//20200721: 10s proved to be less in 8*8 ONU test on local vbox machine with debug, might be further adapted
 			deadline := time.Now().Add(dh.pOpenOnuAc.maxTimeoutInterAdapterComm) //allowed run time to finish before execution
@@ -878,10 +913,12 @@
 			dh.pOnuTP.resetTpProcessingErrorIndication(uniData.PersUniID, tpID)
 			var wg sync.WaitGroup
 			wg.Add(1) // for the 1 go routine to finish
-			go dh.pOnuTP.configureUniTp(log.WithSpanFromContext(dctx, ctx), uniData.PersUniID, uniData.PersTpPathMap[tpID], &wg)
+			go dh.pOnuTP.configureUniTp(log.WithSpanFromContext(dctx, ctx), uniData.PersUniID, uniData.PersTpPathMap[tpID], tpInst, &wg)
 			dh.waitForCompletion(ctx, cancel, &wg, "TechProfReconcile") //wait for background process to finish
 			if err := dh.pOnuTP.getTpProcessingErrorIndication(uniData.PersUniID, tpID); err != nil {
 				logger.Errorw(ctx, err.Error(), log.Fields{"device-id": dh.deviceID})
+				techProfInstLoadFailed = true // stop loading tp instance as soon as we hit failure
+				break outerLoop
 			}
 		}
 		if len(uniData.PersFlowParams) != 0 {
@@ -892,18 +929,22 @@
 		logger.Debugw(ctx, "reconciling - no TPs have been stored before adapter restart - terminate reconcilement",
 			log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		}
 		return
 	}
-	if dh.isSkipOnuConfigReconciling() {
+	if techProfInstLoadFailed {
+		dh.setDeviceReason(drTechProfileConfigDownloadFailed)
+		dh.stopReconciling(ctx, false)
+		return
+	} else if dh.isSkipOnuConfigReconciling() {
 		dh.setDeviceReason(drTechProfileConfigDownloadSuccess)
 	}
 	if !flowsFound {
 		logger.Debugw(ctx, "reconciling - no flows have been stored before adapter restart - terminate reconcilement",
 			log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		}
 	}
 }
@@ -915,7 +956,7 @@
 	if pDevEntry == nil {
 		logger.Errorw(ctx, "No valid OnuDevice - aborting", log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, false)
 		}
 		return
 	}
@@ -926,7 +967,7 @@
 		logger.Debugw(ctx, "reconciling - no uni-configs have been stored before adapter restart - terminate reconcilement",
 			log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		}
 		return
 	}
@@ -951,7 +992,7 @@
 			logger.Errorw(ctx, "reconciling - onuUniPort data not found  - terminate reconcilement",
 				log.Fields{"uniNo": uniNo, "device-id": dh.deviceID})
 			if !dh.isSkipOnuConfigReconciling() {
-				dh.stopReconciling(ctx)
+				dh.stopReconciling(ctx, false)
 			}
 			return
 		}
@@ -997,7 +1038,7 @@
 		logger.Debugw(ctx, "reconciling - no flows have been stored before adapter restart - terminate reconcilement",
 			log.Fields{"device-id": dh.deviceID})
 		if !dh.isSkipOnuConfigReconciling() {
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		}
 		return
 	}
@@ -1008,7 +1049,7 @@
 
 func (dh *deviceHandler) reconcileEnd(ctx context.Context) {
 	logger.Debugw(ctx, "reconciling - completed!", log.Fields{"device-id": dh.deviceID})
-	dh.stopReconciling(ctx)
+	dh.stopReconciling(ctx, true)
 }
 
 func (dh *deviceHandler) deleteDevicePersistencyData(ctx context.Context) error {
@@ -1752,7 +1793,7 @@
 			pDevEntry.mutexPersOnuConfig.RUnlock()
 			logger.Debugw(ctx, "reconciling - uni-ports were not unlocked before adapter restart - resume with a normal start-up",
 				log.Fields{"device-id": dh.deviceID})
-			dh.stopReconciling(ctx)
+			dh.stopReconciling(ctx, true)
 		} else {
 			pDevEntry.mutexPersOnuConfig.RUnlock()
 		}
@@ -2715,6 +2756,7 @@
 
 	logger.Debugw(ctx, "SetKVStoreBackend", log.Fields{"IpTarget": dh.pOpenOnuAc.KVStoreAddress,
 		"BasePathKvStore": aBasePathKvStore, "device-id": dh.deviceID})
+	// kvbackend := db.NewBackend(ctx, dh.pOpenOnuAc.KVStoreType, dh.pOpenOnuAc.KVStoreAddress, dh.pOpenOnuAc.KVStoreTimeout, aBasePathKvStore)
 	kvbackend := &db.Backend{
 		Client:    dh.pOpenOnuAc.kvClient,
 		StoreType: dh.pOpenOnuAc.KVStoreType,
@@ -3614,10 +3656,10 @@
 	dh.mutexReconcilingFlag.Unlock()
 }
 
-func (dh *deviceHandler) stopReconciling(ctx context.Context) {
-	logger.Debugw(ctx, "stop reconciling", log.Fields{"device-id": dh.deviceID})
+func (dh *deviceHandler) stopReconciling(ctx context.Context, success bool) {
+	logger.Debugw(ctx, "stop reconciling", log.Fields{"device-id": dh.deviceID, "success": success})
 	if dh.isReconciling() {
-		dh.chReconcilingFinished <- true
+		dh.chReconcilingFinished <- success
 	} else {
 		logger.Infow(ctx, "reconciling is not running", log.Fields{"device-id": dh.deviceID})
 	}
diff --git a/internal/pkg/onuadaptercore/file_download_manager.go b/internal/pkg/onuadaptercore/file_download_manager.go
index b75c319..d283606 100644
--- a/internal/pkg/onuadaptercore/file_download_manager.go
+++ b/internal/pkg/onuadaptercore/file_download_manager.go
@@ -29,7 +29,7 @@
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
diff --git a/internal/pkg/onuadaptercore/mib_download.go b/internal/pkg/onuadaptercore/mib_download.go
index 305a647..be63ead 100644
--- a/internal/pkg/onuadaptercore/mib_download.go
+++ b/internal/pkg/onuadaptercore/mib_download.go
@@ -26,7 +26,7 @@
 
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
diff --git a/internal/pkg/onuadaptercore/mib_sync.go b/internal/pkg/onuadaptercore/mib_sync.go
index 8104d51..567a3f4 100644
--- a/internal/pkg/onuadaptercore/mib_sync.go
+++ b/internal/pkg/onuadaptercore/mib_sync.go
@@ -31,11 +31,11 @@
 	//"sync"
 	"time"
 
-	//"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
@@ -330,7 +330,7 @@
 				if success {
 					logger.Debugw(ctx, "reconciling flows has been finished in time",
 						log.Fields{"device-id": oo.deviceID})
-					oo.baseDeviceHandler.stopReconciling(ctx)
+					oo.baseDeviceHandler.stopReconciling(ctx, true)
 					_ = oo.pMibUploadFsm.pFsm.Event(ulEvSuccess)
 
 				} else {
@@ -1093,7 +1093,7 @@
 		oo.baseDeviceHandler.chReconcilingFlowsFinished <- false
 	}
 	if oo.baseDeviceHandler.isReconciling() {
-		oo.baseDeviceHandler.chReconcilingFinished <- false
+		oo.baseDeviceHandler.stopReconciling(ctx, false)
 	}
 	//the MibSync FSM might be active all the ONU-active time,
 	// hence it must be stopped unconditionally
diff --git a/internal/pkg/onuadaptercore/omci_ani_config.go b/internal/pkg/onuadaptercore/omci_ani_config.go
index b6ca5e4..94cf938 100644
--- a/internal/pkg/onuadaptercore/omci_ani_config.go
+++ b/internal/pkg/onuadaptercore/omci_ani_config.go
@@ -30,7 +30,7 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
@@ -1374,7 +1374,7 @@
 			_ = oFsm.pAdaptFsm.pFsm.Event(aniEvReset)
 			return
 		}
-		// Mark the gem port to be removed for Performance History monitoring
+		// Mark the gem port to be added for Performance History monitoring
 		if oFsm.pDeviceHandler.pOnuMetricsMgr != nil {
 			oFsm.pDeviceHandler.pOnuMetricsMgr.AddGemPortForPerfMonitoring(ctx, gemPortAttribs.gemPortID)
 		}
diff --git a/internal/pkg/onuadaptercore/omci_cc.go b/internal/pkg/onuadaptercore/omci_cc.go
index edeab97..713426c 100644
--- a/internal/pkg/onuadaptercore/omci_cc.go
+++ b/internal/pkg/onuadaptercore/omci_cc.go
@@ -33,11 +33,11 @@
 
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
 
-	//"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/common"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
diff --git a/internal/pkg/onuadaptercore/omci_onu_upgrade.go b/internal/pkg/onuadaptercore/omci_onu_upgrade.go
index b21291f..4f0ac62 100644
--- a/internal/pkg/onuadaptercore/omci_onu_upgrade.go
+++ b/internal/pkg/onuadaptercore/omci_onu_upgrade.go
@@ -29,7 +29,7 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
diff --git a/internal/pkg/onuadaptercore/omci_self_test_handler.go b/internal/pkg/onuadaptercore/omci_self_test_handler.go
index 55116b6..2e968fb 100644
--- a/internal/pkg/onuadaptercore/omci_self_test_handler.go
+++ b/internal/pkg/onuadaptercore/omci_self_test_handler.go
@@ -23,7 +23,7 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	"github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/extension"
 	"sync"
 	"time"
diff --git a/internal/pkg/onuadaptercore/omci_test_request.go b/internal/pkg/onuadaptercore/omci_test_request.go
index d11133e..f0b76dc 100644
--- a/internal/pkg/onuadaptercore/omci_test_request.go
+++ b/internal/pkg/onuadaptercore/omci_test_request.go
@@ -28,8 +28,8 @@
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
 
-	//"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
diff --git a/internal/pkg/onuadaptercore/omci_vlan_config.go b/internal/pkg/onuadaptercore/omci_vlan_config.go
index a18a216..d95a8a7 100644
--- a/internal/pkg/onuadaptercore/omci_vlan_config.go
+++ b/internal/pkg/onuadaptercore/omci_vlan_config.go
@@ -31,7 +31,7 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	of "github.com/opencord/voltha-protos/v4/go/openflow_13"
 )
 
diff --git a/internal/pkg/onuadaptercore/onu_device_db.go b/internal/pkg/onuadaptercore/onu_device_db.go
index 46638e1..83f419b 100644
--- a/internal/pkg/onuadaptercore/onu_device_db.go
+++ b/internal/pkg/onuadaptercore/onu_device_db.go
@@ -25,7 +25,7 @@
 	"sync"
 
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 type meDbMap map[me.ClassID]map[uint16]me.AttributeValueMap
diff --git a/internal/pkg/onuadaptercore/onu_device_entry.go b/internal/pkg/onuadaptercore/onu_device_entry.go
index 6302bc7..a6a51e1 100644
--- a/internal/pkg/onuadaptercore/onu_device_entry.go
+++ b/internal/pkg/onuadaptercore/onu_device_entry.go
@@ -32,12 +32,12 @@
 	//"time"
 
 	"github.com/looplab/fsm"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
 
-	//"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 	//"github.com/opencord/voltha-protos/v4/go/voltha"
diff --git a/internal/pkg/onuadaptercore/onu_image_status.go b/internal/pkg/onuadaptercore/onu_image_status.go
index 5e610ac..f36a44e 100755
--- a/internal/pkg/onuadaptercore/onu_image_status.go
+++ b/internal/pkg/onuadaptercore/onu_image_status.go
@@ -26,7 +26,7 @@
 
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
diff --git a/internal/pkg/onuadaptercore/onu_metrics_manager.go b/internal/pkg/onuadaptercore/onu_metrics_manager.go
index c9685de..3276882 100644
--- a/internal/pkg/onuadaptercore/onu_metrics_manager.go
+++ b/internal/pkg/onuadaptercore/onu_metrics_manager.go
@@ -28,9 +28,9 @@
 	"github.com/looplab/fsm"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
@@ -330,6 +330,13 @@
 			log.Fields{"device-id": dh.deviceID, "service": baseKvStorePath})
 		return nil
 	}
+	// restore data from KV store
+	if err := metricsManager.restorePmData(ctx); err != nil {
+		logger.Errorw(ctx, "error restoring pm data", log.Fields{"err": err})
+		// we continue given that it does not effect the actual services for the ONU,
+		// but there may be some negative effect on PM collection (there may be some mismatch in
+		// the actual PM config and what is present on the device).
+	}
 
 	logger.Info(ctx, "init-onuMetricsManager completed", log.Fields{"device-id": dh.deviceID})
 	return &metricsManager
@@ -1116,13 +1123,6 @@
 // ** L2 PM FSM Handlers start **
 
 func (mm *onuMetricsManager) l2PMFsmStarting(ctx context.Context, e *fsm.Event) {
-	// restore data from KV store
-	if err := mm.restorePmData(ctx); err != nil {
-		logger.Errorw(ctx, "error restoring pm data", log.Fields{"err": err})
-		// we continue given that it does not effect the actual services for the ONU,
-		// but there may be some negative effect on PM collection (there may be some mismatch in
-		// the actual PM config and what is present on the device).
-	}
 
 	// Loop through all the group metrics
 	// If it is a L2 PM Interval metric and it is enabled, then if it is not in the
diff --git a/internal/pkg/onuadaptercore/onu_uni_port.go b/internal/pkg/onuadaptercore/onu_uni_port.go
index 660c043..bfd52f9 100644
--- a/internal/pkg/onuadaptercore/onu_uni_port.go
+++ b/internal/pkg/onuadaptercore/onu_uni_port.go
@@ -22,12 +22,13 @@
 	"fmt"
 	"strconv"
 	"strings"
+	"time"
 
 	//"sync"
 	//"time"
 
-	//"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	vc "github.com/opencord/voltha-protos/v4/go/common"
 	of "github.com/opencord/voltha-protos/v4/go/openflow_13"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
@@ -129,21 +130,29 @@
 			MaxSpeed:   1000,
 		},
 	}
-	if pUniPort != nil {
-		if err := apDeviceHandler.coreProxy.PortCreated(log.WithSpanFromContext(context.TODO(), ctx),
+	maxRetry := 3
+	retryCnt := 0
+	var err error
+	for retryCnt = 0; retryCnt < maxRetry; retryCnt++ {
+		if err = apDeviceHandler.coreProxy.PortCreated(log.WithSpanFromContext(context.TODO(), ctx),
 			apDeviceHandler.deviceID, pUniPort); err != nil {
-			logger.Fatalf(ctx, "adding-uni-port: create-VOLTHA-Port-failed-%s", err)
-			return err
+			logger.Errorf(ctx, "Device FSM: PortCreated-failed-%s, retrying after a delay", err)
+			// retry after a sleep
+			time.Sleep(2 * time.Second)
+		} else {
+			// success, break from retry loop
+			break
 		}
-		logger.Infow(ctx, "Voltha onuUniPort-added", log.Fields{
-			"device-id": apDeviceHandler.device.Id, "PortNo": oo.portNo})
-		oo.pPort = pUniPort
-		oo.operState = vc.OperStatus_DISCOVERED
-	} else {
-		logger.Warnw(ctx, "could not create Voltha UniPort", log.Fields{
-			"device-id": apDeviceHandler.device.Id, "PortNo": oo.portNo})
-		return fmt.Errorf("create Voltha UniPort %d failed on %s", oo.portNo, apDeviceHandler.device.Id)
 	}
+	if retryCnt == maxRetry { // maxed out..
+		logger.Errorf(ctx, "Device FSM: PortCreated-failed-%s", err)
+		return fmt.Errorf("device-fsm-port-create-failed-%s", err)
+	}
+	logger.Infow(ctx, "Voltha onuUniPort-added", log.Fields{
+		"device-id": apDeviceHandler.device.Id, "PortNo": oo.portNo})
+	oo.pPort = pUniPort
+	oo.operState = vc.OperStatus_DISCOVERED
+
 	return nil
 }
 
diff --git a/internal/pkg/onuadaptercore/onu_uni_tp.go b/internal/pkg/onuadaptercore/onu_uni_tp.go
index dff13d4..246bd94 100644
--- a/internal/pkg/onuadaptercore/onu_uni_tp.go
+++ b/internal/pkg/onuadaptercore/onu_uni_tp.go
@@ -19,21 +19,15 @@
 
 import (
 	"context"
-	"encoding/json"
-	"errors"
 	"fmt"
+	"github.com/opencord/voltha-protos/v4/go/tech_profile"
 	"strconv"
 	"strings"
 	"sync"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	tp "github.com/opencord/voltha-lib-go/v4/pkg/techprofile"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
-const cBasePathTechProfileKVStore = "%s/technology_profiles"
-
 //definitions for TechProfileProcessing - copied from OltAdapter:openolt_flowmgr.go
 //  could perhaps be defined more globally
 const (
@@ -98,7 +92,6 @@
 	baseDeviceHandler        *deviceHandler
 	deviceID                 string
 	tpProcMutex              sync.RWMutex
-	techProfileKVStore       *db.Backend
 	chTpConfigProcessingStep chan uint8
 	mapUniTpIndication       map[uniTP]*tTechProfileIndication //use pointer values to ease assignments to the map
 	mapPonAniConfig          map[uniTP]*tcontGemList           //per UNI: use pointer values to ease assignments to the map
@@ -136,12 +129,6 @@
 	onuTP.tpProfileExists = make(map[uniTP]bool)
 	onuTP.tpProfileResetting = make(map[uniTP]bool)
 	onuTP.mapRemoveGemEntry = make(map[uniTP]*gemPortParamStruct)
-	baseKvStorePath := fmt.Sprintf(cBasePathTechProfileKVStore, aDeviceHandler.pOpenOnuAc.cm.Backend.PathPrefix)
-	onuTP.techProfileKVStore = aDeviceHandler.setBackend(ctx, baseKvStorePath)
-	if onuTP.techProfileKVStore == nil {
-		logger.Errorw(ctx, "Can't access techProfileKVStore - no backend connection to service",
-			log.Fields{"device-id": aDeviceHandler.deviceID, "service": baseKvStorePath})
-	}
 
 	return &onuTP
 }
@@ -175,7 +162,7 @@
 // but take care on sequential background processing when needed (logical dependencies)
 //   use waitForTimeoutOrCompletion(ctx, chTpConfigProcessingStep, processingStep) for internal synchronization
 func (onuTP *onuUniTechProf) configureUniTp(ctx context.Context,
-	aUniID uint8, aPathString string, wg *sync.WaitGroup) {
+	aUniID uint8, aPathString string, tpInst tech_profile.TechProfileInstance, wg *sync.WaitGroup) {
 	defer wg.Done() //always decrement the waitGroup on return
 	logger.Debugw(ctx, "configure the Uni according to TpPath", log.Fields{
 		"device-id": onuTP.deviceID, "uni-id": aUniID, "path": aPathString})
@@ -185,14 +172,6 @@
 		logger.Errorw(ctx, "error-extracting-tp-id-from-tp-path", log.Fields{"device-id": onuTP.deviceID, "uni-id": aUniID, "path": aPathString})
 		return
 	}
-	if onuTP.techProfileKVStore == nil {
-		logger.Errorw(ctx, "techProfileKVStore not set - abort",
-			log.Fields{"device-id": onuTP.deviceID})
-		onuTP.mutexTPState.Lock()
-		defer onuTP.mutexTPState.Unlock()
-		onuTP.procResult[uniTpKey] = errors.New("techProfile config aborted: techProfileKVStore not set")
-		return
-	}
 
 	//ensure that the given uniID is available (configured) in the UniPort class (used for OMCI entities)
 	var pCurrentUniPort *onuUniPort
@@ -243,7 +222,7 @@
 
 	processingStep++
 	*/
-	go onuTP.readAniSideConfigFromTechProfile(ctx, aUniID, tpID, aPathString, processingStep)
+	go onuTP.readAniSideConfigFromTechProfile(ctx, aUniID, tpID, aPathString, tpInst, processingStep)
 	if !onuTP.waitForTimeoutOrCompletion(ctx, onuTP.chTpConfigProcessingStep, processingStep) {
 		//timeout or error detected
 		onuTP.mutexTPState.RLock()
@@ -330,9 +309,8 @@
 /* internal methods *********************/
 // nolint: gocyclo
 func (onuTP *onuUniTechProf) readAniSideConfigFromTechProfile(
-	ctx context.Context, aUniID uint8, aTpID uint8, aPathString string, aProcessingStep uint8) {
-	var tpInst tp.TechProfile
-
+	ctx context.Context, aUniID uint8, aTpID uint8, aPathString string, tpInst tech_profile.TechProfileInstance, aProcessingStep uint8) {
+	var err error
 	//store profile type and identifier for later usage within the OMCI identifier and possibly ME setup
 	//pathstring is defined to be in the form of <ProfType>/<profID>/<Interface/../Identifier>
 	subStringSlice := strings.Split(aPathString, "/")
@@ -387,47 +365,16 @@
 			"profType": onuTP.mapUniTpIndication[uniTPKey].techProfileType,
 			"profID":   onuTP.mapUniTpIndication[uniTPKey].techProfileID})
 
-	Value, err := onuTP.techProfileKVStore.Get(ctx, aPathString)
-	if err == nil {
-		if Value != nil {
-			logger.Debugw(ctx, "tech-profile read",
-				log.Fields{"Key": Value.Key, "device-id": onuTP.deviceID})
-			tpTmpBytes, _ := kvstore.ToByte(Value.Value)
-
-			if err = json.Unmarshal(tpTmpBytes, &tpInst); err != nil {
-				logger.Errorw(ctx, "TechProf - Failed to unmarshal tech-profile into tpInst",
-					log.Fields{"error": err, "device-id": onuTP.deviceID})
-				onuTP.chTpConfigProcessingStep <- 0 //error indication
-				return
-			}
-			logger.Debugw(ctx, "TechProf - tpInst", log.Fields{"tpInst": tpInst})
-			// access examples
-			logger.Debugw(ctx, "TechProf content", log.Fields{"Name": tpInst.Name,
-				"MaxGemPayloadSize":                tpInst.InstanceCtrl.MaxGemPayloadSize,
-				"DownstreamGemDiscardmaxThreshold": tpInst.DownstreamGemPortAttributeList[0].DiscardConfig.MaxThreshold})
-		} else {
-			logger.Errorw(ctx, "No tech-profile found",
-				log.Fields{"path": aPathString, "device-id": onuTP.deviceID})
-			onuTP.chTpConfigProcessingStep <- 0 //error indication
-			return
-		}
-	} else {
-		logger.Errorw(ctx, "kvstore-get failed for path",
-			log.Fields{"path": aPathString, "device-id": onuTP.deviceID})
-		onuTP.chTpConfigProcessingStep <- 0 //error indication
-		return
-	}
-
 	//default start with 1Tcont profile, later perhaps extend to MultiTcontMultiGem
 	localMapGemPortParams := make(map[uint16]*gemPortParamStruct)
 	onuTP.mapPonAniConfig[uniTPKey] = &tcontGemList{tcontParamStruct{}, localMapGemPortParams}
 
 	//note: the code is currently restricted to one TCcont per Onu (index [0])
 	//get the relevant values from the profile and store to mapPonAniConfig
-	onuTP.mapPonAniConfig[uniTPKey].tcontParams.allocID = uint16(tpInst.UsScheduler.AllocID)
+	onuTP.mapPonAniConfig[uniTPKey].tcontParams.allocID = uint16(tpInst.UsScheduler.AllocId)
 	//maybe tCont scheduling not (yet) needed - just to basically have it for future
 	//  (would only be relevant in case of ONU-2G QOS configuration flexibility)
-	if tpInst.UsScheduler.QSchedPolicy == "StrictPrio" {
+	if tpInst.UsScheduler.QSchedPolicy == tech_profile.SchedulingPolicy_StrictPriority {
 		onuTP.mapPonAniConfig[uniTPKey].tcontParams.schedPolicy = 1 //for the moment fixed value acc. G.988 //TODO: defines!
 	} else {
 		//default profile defines "Hybrid" - which probably comes down to WRR with some weigthts for SP
@@ -446,37 +393,37 @@
 			loGemPortRead = true
 		}
 		//for all GemPorts we need to extend the mapGemPortParams
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)] = &gemPortParamStruct{}
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)] = &gemPortParamStruct{}
 
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].gemPortID =
-			uint16(content.GemportID)
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].gemPortID =
+			uint16(content.GemportId)
 		//direction can be correlated later with Downstream list,
 		//  for now just assume bidirectional (upstream never exists alone)
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].direction = 3 //as defined in G.988
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].direction = 3 //as defined in G.988
 		// expected Prio-Queue values 0..7 with 7 for highest PrioQueue, QueueIndex=Prio = 0..7
-		if content.PriorityQueue > 7 {
+		if content.PriorityQ > 7 {
 			logger.Errorw(ctx, "PonAniConfig reject on GemPortList - PrioQueue value invalid",
-				log.Fields{"device-id": onuTP.deviceID, "index": pos, "PrioQueue": content.PriorityQueue})
+				log.Fields{"device-id": onuTP.deviceID, "index": pos, "PrioQueue": content.PriorityQ})
 			//remove PonAniConfig  as done so far, delete map should be safe, even if not existing
 			delete(onuTP.mapPonAniConfig, uniTPKey)
 			onuTP.chTpConfigProcessingStep <- 0 //error indication
 			return
 		}
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].prioQueueIndex =
-			uint8(content.PriorityQueue)
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].pbitString =
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].prioQueueIndex =
+			uint8(content.PriorityQ)
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].pbitString =
 			strings.TrimPrefix(content.PbitMap, binaryStringPrefix)
 		if content.AesEncryption == "True" {
-			onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].gemPortEncState = 1
+			onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].gemPortEncState = 1
 		} else {
-			onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].gemPortEncState = 0
+			onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].gemPortEncState = 0
 		}
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].discardPolicy =
-			content.DiscardPolicy
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].queueSchedPolicy =
-			content.SchedulingPolicy
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].discardPolicy =
+			content.DiscardPolicy.String()
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].queueSchedPolicy =
+			content.SchedulingPolicy.String()
 		//'GemWeight' looks strange in default profile, for now we just copy the weight to first queue
-		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportID)].queueWeight =
+		onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[uint16(content.GemportId)].queueWeight =
 			uint8(content.Weight)
 	}
 
@@ -500,7 +447,7 @@
 		}
 		logger.Infow(ctx, "Gem Port is multicast", log.Fields{"isMulticast": isMulticast})
 		if isMulticast {
-			mcastGemID := uint16(downstreamContent.McastGemID)
+			mcastGemID := uint16(downstreamContent.MulticastGemId)
 			_, existing := onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID]
 			if existing {
 				//GEM port was previously configured, avoid setting multicast attributes
@@ -526,32 +473,32 @@
 				}
 
 				// expected Prio-Queue values 0..7 with 7 for highest PrioQueue, QueueIndex=Prio = 0..7
-				if downstreamContent.PriorityQueue > 7 {
+				if downstreamContent.PriorityQ > 7 {
 					logger.Errorw(ctx, "PonAniConfig reject on GemPortList - PrioQueue value invalid",
-						log.Fields{"device-id": onuTP.deviceID, "index": mcastGemID, "PrioQueue": downstreamContent.PriorityQueue})
+						log.Fields{"device-id": onuTP.deviceID, "index": mcastGemID, "PrioQueue": downstreamContent.PriorityQ})
 					//remove PonAniConfig  as done so far, delete map should be safe, even if not existing
 					delete(onuTP.mapPonAniConfig, uniTPKey)
 					onuTP.chTpConfigProcessingStep <- 0 //error indication
 					return
 				}
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].prioQueueIndex =
-					uint8(downstreamContent.PriorityQueue)
+					uint8(downstreamContent.PriorityQ)
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].pbitString =
 					strings.TrimPrefix(downstreamContent.PbitMap, binaryStringPrefix)
 
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].discardPolicy =
-					downstreamContent.DiscardPolicy
+					downstreamContent.DiscardPolicy.String()
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].queueSchedPolicy =
-					downstreamContent.SchedulingPolicy
+					downstreamContent.SchedulingPolicy.String()
 				//'GemWeight' looks strange in default profile, for now we just copy the weight to first queue
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].queueWeight =
 					uint8(downstreamContent.Weight)
 
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].isMulticast = isMulticast
 				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].multicastGemPortID =
-					uint16(downstreamContent.McastGemID)
-				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].staticACL = downstreamContent.SControlList
-				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].dynamicACL = downstreamContent.DControlList
+					uint16(downstreamContent.MulticastGemId)
+				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].staticACL = downstreamContent.StaticAccessControlList
+				onuTP.mapPonAniConfig[uniTPKey].mapGemPortParams[mcastGemID].dynamicACL = downstreamContent.DynamicAccessControlList
 			}
 		}
 	}
diff --git a/internal/pkg/onuadaptercore/openonu.go b/internal/pkg/onuadaptercore/openonu.go
index b291b65..2c77aa2 100644
--- a/internal/pkg/onuadaptercore/openonu.go
+++ b/internal/pkg/onuadaptercore/openonu.go
@@ -24,14 +24,14 @@
 	"sync"
 	"time"
 
-	conf "github.com/opencord/voltha-lib-go/v4/pkg/config"
+	conf "github.com/opencord/voltha-lib-go/v5/pkg/config"
 
 	"github.com/golang/protobuf/ptypes"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/extension"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"github.com/opencord/voltha-protos/v4/go/openflow_13"
@@ -305,6 +305,12 @@
 	return fmt.Errorf(fmt.Sprintf("handler-not-found-%s", targetDevice))
 }
 
+//Process_tech_profile_instance_request not implemented
+func (oo *OpenONUAC) Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage {
+	logger.Error(ctx, "unImplemented")
+	return nil
+}
+
 //Adapter_descriptor not implemented
 func (oo *OpenONUAC) Adapter_descriptor(ctx context.Context) error {
 	return errors.New("unImplemented")
diff --git a/internal/pkg/onuadaptercore/openonuimpl.go b/internal/pkg/onuadaptercore/openonuimpl.go
index 1230a01..8e6e187 100644
--- a/internal/pkg/onuadaptercore/openonuimpl.go
+++ b/internal/pkg/onuadaptercore/openonuimpl.go
@@ -20,7 +20,7 @@
 import (
 	"context"
 	"errors"
-	//"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	//"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 /*
diff --git a/internal/pkg/onuadaptercore/uniportadmin.go b/internal/pkg/onuadaptercore/uniportadmin.go
index fea1838..0c22500 100644
--- a/internal/pkg/onuadaptercore/uniportadmin.go
+++ b/internal/pkg/onuadaptercore/uniportadmin.go
@@ -27,7 +27,7 @@
 
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	//ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	//"github.com/opencord/voltha-protos/v4/go/openflow_13"
 )
diff --git a/internal/pkg/onuadaptercore/uniportstatus.go b/internal/pkg/onuadaptercore/uniportstatus.go
index 1674b52..fe9af4c 100644
--- a/internal/pkg/onuadaptercore/uniportstatus.go
+++ b/internal/pkg/onuadaptercore/uniportstatus.go
@@ -21,7 +21,7 @@
 	"context"
 	"github.com/opencord/omci-lib-go"
 	me "github.com/opencord/omci-lib-go/generated"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/extension"
 	"time"
 )
diff --git a/pkg/mocks/common.go b/pkg/mocks/common.go
index e469948..72a4c30 100644
--- a/pkg/mocks/common.go
+++ b/pkg/mocks/common.go
@@ -18,7 +18,7 @@
 package mocks
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 func init() {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
deleted file mode 100644
index 9ade0d1..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
-)
-
-type AdapterProxy struct {
-	kafkaICProxy kafka.InterContainerProxy
-	coreTopic    string
-	endpointMgr  kafka.EndpointManager
-}
-
-func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
-	proxy := AdapterProxy{
-		kafkaICProxy: kafkaProxy,
-		coreTopic:    coreTopic,
-		endpointMgr:  kafka.NewEndpointManager(backend),
-	}
-	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
-	return &proxy
-}
-
-func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
-	msg proto.Message,
-	msgType ic.InterAdapterMessageType_Types,
-	fromAdapter string,
-	toAdapter string,
-	toDeviceId string,
-	proxyDeviceId string,
-	messageId string) error {
-	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
-		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
-
-	//Marshal the message
-	var marshalledMsg *any.Any
-	var err error
-	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
-		return err
-	}
-
-	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
-	if err != nil {
-		return err
-	}
-
-	//Build the inter adapter message
-	header := &ic.InterAdapterHeader{
-		Type:          msgType,
-		FromTopic:     fromAdapter,
-		ToTopic:       string(endpoint),
-		ToDeviceId:    toDeviceId,
-		ProxyDeviceId: proxyDeviceId,
-	}
-	if messageId != "" {
-		header.Id = messageId
-	} else {
-		header.Id = uuid.New().String()
-	}
-	header.Timestamp = ptypes.TimestampNow()
-	iaMsg := &ic.InterAdapterMessage{
-		Header: header,
-		Body:   marshalledMsg,
-	}
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "msg",
-		Value: iaMsg,
-	}
-
-	topic := kafka.Topic{Name: string(endpoint)}
-	replyToTopic := kafka.Topic{Name: fromAdapter}
-	rpc := "process_inter_adapter_message"
-
-	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
-	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
-	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(ctx, rpc, "", success, result)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
deleted file mode 100644
index 98f0559..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
+++ /dev/null
@@ -1,506 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kvstore
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	v3Client "go.etcd.io/etcd/clientv3"
-
-	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
-	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
-)
-
-// EtcdClient represents the Etcd KV store client
-type EtcdClient struct {
-	ectdAPI             *v3Client.Client
-	keyReservations     map[string]*v3Client.LeaseID
-	watchedChannels     sync.Map
-	keyReservationsLock sync.RWMutex
-	lockToMutexMap      map[string]*v3Concurrency.Mutex
-	lockToSessionMap    map[string]*v3Concurrency.Session
-	lockToMutexLock     sync.Mutex
-}
-
-// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
-// the called to specify etcd client configuration
-func NewEtcdCustomClient(ctx context.Context, config *v3Client.Config) (*EtcdClient, error) {
-	c, err := v3Client.New(*config)
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-
-	reservations := make(map[string]*v3Client.LeaseID)
-	lockMutexMap := make(map[string]*v3Concurrency.Mutex)
-	lockSessionMap := make(map[string]*v3Concurrency.Session)
-
-	return &EtcdClient{ectdAPI: c, keyReservations: reservations, lockToMutexMap: lockMutexMap,
-		lockToSessionMap: lockSessionMap}, nil
-}
-
-// NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
-	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
-
-	return NewEtcdCustomClient(
-		ctx,
-		&v3Client.Config{
-			Endpoints:   []string{addr},
-			DialTimeout: timeout,
-			LogConfig:   &logconfig})
-}
-
-// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
-// it is assumed the connection is down or unreachable.
-func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
-	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
-	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
-		return false
-	}
-	//cancel()
-	return true
-}
-
-// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
-	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-	m := make(map[string]*KVPair)
-	for _, ev := range resp.Kvs {
-		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
-	}
-	return m, nil
-}
-
-// Get returns a key-value pair for a given key. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
-
-	resp, err := c.ectdAPI.Get(ctx, key)
-
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-	for _, ev := range resp.Kvs {
-		// Only one value is returned
-		return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
-	}
-	return nil, nil
-}
-
-// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
-// accepts only a string as a value for a put operation. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
-
-	// Validate that we can convert value to a string as etcd API expects a string
-	var val string
-	var er error
-	if val, er = ToString(value); er != nil {
-		return fmt.Errorf("unexpected-type-%T", value)
-	}
-
-	var err error
-	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
-	// that KV key permanent instead of automatically removing it after a lease expiration
-	c.keyReservationsLock.RLock()
-	leaseID, ok := c.keyReservations[key]
-	c.keyReservationsLock.RUnlock()
-	if ok {
-		_, err = c.ectdAPI.Put(ctx, key, val, v3Client.WithLease(*leaseID))
-	} else {
-		_, err = c.ectdAPI.Put(ctx, key, val)
-	}
-
-	if err != nil {
-		switch err {
-		case context.Canceled:
-			logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
-		case context.DeadlineExceeded:
-			logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
-		case v3rpcTypes.ErrEmptyKey:
-			logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
-		default:
-			logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
-		}
-		return err
-	}
-	return nil
-}
-
-// Delete removes a key from the KV store. Timeout defines how long the function will
-// wait for a response
-func (c *EtcdClient) Delete(ctx context.Context, key string) error {
-
-	// delete the key
-	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
-		logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
-		return err
-	}
-	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
-	return nil
-}
-
-func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
-
-	//delete the prefix
-	if _, err := c.ectdAPI.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
-		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
-		return err
-	}
-	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
-	return nil
-}
-
-// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
-// the etcd API accepts only a string.  Timeout defines how long the function will wait for a response.  TTL
-// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
-// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
-// then the value assigned to that key will be returned.
-func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
-	// Validate that we can convert value to a string as etcd API expects a string
-	var val string
-	var er error
-	if val, er = ToString(value); er != nil {
-		return nil, fmt.Errorf("unexpected-type%T", value)
-	}
-
-	resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-	// Register the lease id
-	c.keyReservationsLock.Lock()
-	c.keyReservations[key] = &resp.ID
-	c.keyReservationsLock.Unlock()
-
-	// Revoke lease if reservation is not successful
-	reservationSuccessful := false
-	defer func() {
-		if !reservationSuccessful {
-			if err = c.ReleaseReservation(context.Background(), key); err != nil {
-				logger.Error(ctx, "cannot-release-lease")
-			}
-		}
-	}()
-
-	// Try to grap the Key with the above lease
-	c.ectdAPI.Txn(context.Background())
-	txn := c.ectdAPI.Txn(context.Background())
-	txn = txn.If(v3Client.Compare(v3Client.Version(key), "=", 0))
-	txn = txn.Then(v3Client.OpPut(key, val, v3Client.WithLease(resp.ID)))
-	txn = txn.Else(v3Client.OpGet(key))
-	result, er := txn.Commit()
-	if er != nil {
-		return nil, er
-	}
-
-	if !result.Succeeded {
-		// Verify whether we are already the owner of that Key
-		if len(result.Responses) > 0 &&
-			len(result.Responses[0].GetResponseRange().Kvs) > 0 {
-			kv := result.Responses[0].GetResponseRange().Kvs[0]
-			if string(kv.Value) == val {
-				reservationSuccessful = true
-				return value, nil
-			}
-			return kv.Value, nil
-		}
-	} else {
-		// Read the Key to ensure this is our Key
-		m, err := c.Get(ctx, key)
-		if err != nil {
-			return nil, err
-		}
-		if m != nil {
-			if m.Key == key && isEqual(m.Value, value) {
-				// My reservation is successful - register it.  For now, support is only for 1 reservation per key
-				// per session.
-				reservationSuccessful = true
-				return value, nil
-			}
-			// My reservation has failed.  Return the owner of that key
-			return m.Value, nil
-		}
-	}
-	return nil, nil
-}
-
-// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
-func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
-	c.keyReservationsLock.Lock()
-	defer c.keyReservationsLock.Unlock()
-
-	for key, leaseID := range c.keyReservations {
-		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
-		if err != nil {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
-			return err
-		}
-		delete(c.keyReservations, key)
-	}
-	return nil
-}
-
-// ReleaseReservation releases reservation for a specific key.
-func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
-	// Get the leaseid using the key
-	logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
-	var ok bool
-	var leaseID *v3Client.LeaseID
-	c.keyReservationsLock.Lock()
-	defer c.keyReservationsLock.Unlock()
-	if leaseID, ok = c.keyReservations[key]; !ok {
-		return nil
-	}
-
-	if leaseID != nil {
-		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
-		if err != nil {
-			logger.Error(ctx, err)
-			return err
-		}
-		delete(c.keyReservations, key)
-	}
-	return nil
-}
-
-// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
-// period specified when reserving the key
-func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
-	// Get the leaseid using the key
-	var ok bool
-	var leaseID *v3Client.LeaseID
-	c.keyReservationsLock.RLock()
-	leaseID, ok = c.keyReservations[key]
-	c.keyReservationsLock.RUnlock()
-
-	if !ok {
-		return errors.New("key-not-reserved")
-	}
-
-	if leaseID != nil {
-		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
-		if err != nil {
-			logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
-			return err
-		}
-	} else {
-		return errors.New("lease-expired")
-	}
-	return nil
-}
-
-// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
-// listen to receive Events.
-func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
-	w := v3Client.NewWatcher(c.ectdAPI)
-	ctx, cancel := context.WithCancel(ctx)
-	var channel v3Client.WatchChan
-	if withPrefix {
-		channel = w.Watch(ctx, key, v3Client.WithPrefix())
-	} else {
-		channel = w.Watch(ctx, key)
-	}
-
-	// Create a new channel
-	ch := make(chan *Event, maxClientChannelBufferSize)
-
-	// Keep track of the created channels so they can be closed when required
-	channelMap := make(map[chan *Event]v3Client.Watcher)
-	channelMap[ch] = w
-
-	channelMaps := c.addChannelMap(key, channelMap)
-
-	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
-	// json format.
-	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
-	// Launch a go routine to listen for updates
-	go c.listenForKeyChange(ctx, channel, ch, cancel)
-
-	return ch
-
-}
-
-func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
-	var channels interface{}
-	var exists bool
-
-	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
-	} else {
-		channels = []map[chan *Event]v3Client.Watcher{channelMap}
-	}
-	c.watchedChannels.Store(key, channels)
-
-	return channels.([]map[chan *Event]v3Client.Watcher)
-}
-
-func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
-	var channels interface{}
-	var exists bool
-
-	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
-		c.watchedChannels.Store(key, channels)
-	}
-
-	return channels.([]map[chan *Event]v3Client.Watcher)
-}
-
-func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
-	var channels interface{}
-	var exists bool
-
-	channels, exists = c.watchedChannels.Load(key)
-
-	if channels == nil {
-		return nil, exists
-	}
-
-	return channels.([]map[chan *Event]v3Client.Watcher), exists
-}
-
-// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
-// may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
-	// Get the array of channels mapping
-	var watchedChannels []map[chan *Event]v3Client.Watcher
-	var ok bool
-
-	if watchedChannels, ok = c.getChannelMaps(key); !ok {
-		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
-		return
-	}
-	// Look for the channels
-	var pos = -1
-	for i, chMap := range watchedChannels {
-		if t, ok := chMap[ch]; ok {
-			logger.Debug(ctx, "channel-found")
-			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
-			if err := t.Close(); err != nil {
-				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
-			}
-			pos = i
-			break
-		}
-	}
-
-	channelMaps, _ := c.getChannelMaps(key)
-	// Remove that entry if present
-	if pos >= 0 {
-		channelMaps = c.removeChannelMap(key, pos)
-	}
-	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
-}
-
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
-	logger.Debug(ctx, "start-listening-on-channel ...")
-	defer cancel()
-	defer close(ch)
-	for resp := range channel {
-		for _, ev := range resp.Events {
-			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
-		}
-	}
-	logger.Debug(ctx, "stop-listening-on-channel ...")
-}
-
-func getEventType(event *v3Client.Event) int {
-	switch event.Type {
-	case v3Client.EventTypePut:
-		return PUT
-	case v3Client.EventTypeDelete:
-		return DELETE
-	}
-	return UNKNOWN
-}
-
-// Close closes the KV store client
-func (c *EtcdClient) Close(ctx context.Context) {
-	if err := c.ectdAPI.Close(); err != nil {
-		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
-	}
-}
-
-func (c *EtcdClient) addLockName(lockName string, lock *v3Concurrency.Mutex, session *v3Concurrency.Session) {
-	c.lockToMutexLock.Lock()
-	defer c.lockToMutexLock.Unlock()
-	c.lockToMutexMap[lockName] = lock
-	c.lockToSessionMap[lockName] = session
-}
-
-func (c *EtcdClient) deleteLockName(lockName string) {
-	c.lockToMutexLock.Lock()
-	defer c.lockToMutexLock.Unlock()
-	delete(c.lockToMutexMap, lockName)
-	delete(c.lockToSessionMap, lockName)
-}
-
-func (c *EtcdClient) getLock(lockName string) (*v3Concurrency.Mutex, *v3Concurrency.Session) {
-	c.lockToMutexLock.Lock()
-	defer c.lockToMutexLock.Unlock()
-	var lock *v3Concurrency.Mutex
-	var session *v3Concurrency.Session
-	if l, exist := c.lockToMutexMap[lockName]; exist {
-		lock = l
-	}
-	if s, exist := c.lockToSessionMap[lockName]; exist {
-		session = s
-	}
-	return lock, session
-}
-
-func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
-	session, _ := v3Concurrency.NewSession(c.ectdAPI, v3Concurrency.WithContext(ctx))
-	mu := v3Concurrency.NewMutex(session, "/devicelock_"+lockName)
-	if err := mu.Lock(context.Background()); err != nil {
-		//cancel()
-		return err
-	}
-	c.addLockName(lockName, mu, session)
-	return nil
-}
-
-func (c *EtcdClient) ReleaseLock(lockName string) error {
-	lock, session := c.getLock(lockName)
-	var err error
-	if lock != nil {
-		if e := lock.Unlock(context.Background()); e != nil {
-			err = e
-		}
-	}
-	if session != nil {
-		if e := session.Close(); e != nil {
-			err = e
-		}
-	}
-	c.deleteLockName(lockName)
-
-	return err
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json
deleted file mode 100644
index d11f8e4..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json
+++ /dev/null
@@ -1,141 +0,0 @@
- {
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "AdditionalBW_Auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "Hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "AdditionalBW_Auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "Hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md
deleted file mode 100644
index 88d6564..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md
+++ /dev/null
@@ -1,344 +0,0 @@
-Technology Profile Management
-Overview
-Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
-
-NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
-
-Example JSON :
-
-{
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
-
-Creating Technology Profiles
-Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
-
-Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
-
-ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
-
-Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
-
-# Store a Technology template using etcdctl
-jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/XGS-PON/64
-
-jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/XGS-PON/64
-
-
-# Store a Technology template using curl
-curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/XGS-PON/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
-In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
-
-Listing Technical Profiles for a given Technology
-While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
-
-
-#List Tech profile 
-etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/XGS-PON/64
-
-
-# Example output
-A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
-
-# Display a specified Technology profile, using jq to pretty print
-kubectl exec -i etcd-cluster-0000 -- etcdctl get service/voltha/technology_profiles/XGS-PON/64 | jq .
-
-etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/XGS-PON/64
-# Example outpout
-service/voltha/technology_profiles/XGS-PON/64/uni-1
-{
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
-
-# Display a specified Technology profile instance, using jq to pretty print
-kubectl exec -i etcd-cluster-0000 -- etcdctl get service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0} | jq .
-
-etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0}
-# Example outpout
-service/voltha/technology_profiles/XGS-PON/64/pon-{0}/onu-{1}/uni-{0}
-{"name":"Default_1tcont_1gem_Profile","subscriber_identifier":"pon-{0}/onu-{1}/uni-{0}","profile_type":"XGS-PON","version":1,"num_gem_ports":1,"instance_control":{"ONU":"multi-instance","uni":"single-instance","max_gem_payload_size":"auto"},"us_scheduler":{"alloc_id":1024,"direction":"UPSTREAM","additional_bw":"AdditionalBW_BestEffort","priority":0,"weight":0,"q_sched_policy":"Hybrid"},"ds_scheduler":{"alloc_id":1024,"direction":"DOWNSTREAM","additional_bw":"AdditionalBW_BestEffort","priority":0,"weight":0,"q_sched_policy":"Hybrid"},"upstream_gem_port_attribute_list":[{"gemport_id":1024,"max_q_size":"auto","pbit_map":"0b11111111","aes_encryption":"True","scheduling_policy":"WRR","priority_q":0,"weight":0,"discard_policy":"TailDrop","discard_config":{"min_threshold":0,"max_threshold":0,"max_probability":0},"is_multicast":"","dynamic_access_control_list":"","static_access_control_list":"","multicast_gem_id":0}],"downstream_gem_port_attribute_list":[{"gemport_id":1024,"max_q_size":"auto","pbit_map":"0b11111111","aes_encryption":"True","scheduling_policy":"WRR","priority_q":0,"weight":0,"discard_policy":"TailDrop","discard_config":{"min_threshold":0,"max_threshold":0,"max_probability":0},"is_multicast":"","dynamic_access_control_list":"","static_access_control_list":"","multicast_gem_id":0}]}
-
-# Deleting Technology Profiles
-A technology profile or a technology profile tree can be removed using etcdctl rm.
-
-# Remove a specific technology profile
-kubectl exec -i etcd-cluster-0000 -- etcdctl rm /XGS-PON/64
-
-# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
-kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /XGS-PON/64
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
deleted file mode 100644
index 00476a2..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
+++ /dev/null
@@ -1,61 +0,0 @@
-{

-    "name": "SingleQueueEponProfile",

-    "profile_type": "EPON",

-    "version": 1,

-    "num_gem_ports": 1,

-    "instance_control": {

-      "onu": "multi-instance",

-      "uni": "single-instance",

-      "max_gem_payload_size": "auto"

-    },

-    "epon_attribute": {

-        "package_type": "B"

-    },

-    "upstream_queue_attribute_list": [

-      {

-        "pbit_map": "0b11111111",

-        "aes_encryption": "False",

-        "traffic_type": "BE",

-        "unsolicited_grant_size": 0,

-        "nominal_interval": 0,

-        "tolerated_poll_jitter": 0,

-        "request_transmission_policy": 0,

-        "num_q_sets": 2,

-        "q_thresholds": {

-          "q_threshold1":5500,

-          "q_threshold2":0,

-          "q_threshold3":0,

-          "q_threshold4":0,

-          "q_threshold5":0,

-          "q_threshold6":0,

-          "q_threshold7":0

-        },

-        "scheduling_policy": "StrictPriority",

-        "priority_q": 4,

-        "weight": 0,

-        "discard_policy": "TailDrop",

-        "max_q_size": "auto",

-        "discard_config": {

-          "min_threshold": 0,

-          "max_threshold": 0,

-          "max_probability": 0

-        }

-      }

-    ],

-    "downstream_queue_attribute_list": [

-      {

-        "pbit_map": "0b11111111",

-        "aes_encryption": "True",

-        "scheduling_policy": "StrictPriority",

-        "priority_q": 4,

-        "weight": 0,

-        "discard_policy": "TailDrop",

-        "max_q_size": "auto",

-        "discard_config": {

-          "min_threshold": 0,

-          "max_threshold": 0,

-          "max_probability": 0

-        }

-      }

-    ]

-}
\ No newline at end of file
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
deleted file mode 100644
index 544c780..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright 2020-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package techprofile
-
-import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-)
-
-var logger log.CLogger
-
-func init() {
-	// Setup this package so that it's log level can be modified at run time
-	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
-	if err != nil {
-		panic(err)
-	}
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
deleted file mode 100644
index 438ea4a..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package techprofile
-
-import (
-	"fmt"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"time"
-)
-
-// tech profile default constants
-const (
-	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
-	DEFAULT_TECH_PROFILE_TABLE_ID = 64
-	defaultVersion                = 1.0
-	defaultLogLevel               = 0
-	defaultGemportsCount          = 1
-	defaultPbits                  = "0b11111111"
-
-	defaultKVStoreTimeout = 5 * time.Second //in seconds
-
-	// Tech profile path prefix in kv store (for the TP template)
-	// NOTE that this hardcoded to service/voltha as the TP template is shared across stacks
-	defaultTpKvPathPrefix = "service/voltha/technology_profiles"
-
-	// Tech profile path prefix in kv store (for TP instances)
-	defaultKVPathPrefix = "%s/technology_profiles"
-
-	// Tech profile path in kv store
-	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
-
-	// Tech profile instance path in kv store
-	// Format: <technology>/<tech_profile_tableID>/<uni_port_name>
-	defaultTPInstanceKVPath = "%s/%d/%s"
-)
-
-//Tech-Profile JSON String Keys
-// NOTE: Tech profile templeate JSON file should comply with below keys
-const (
-	NAME                               = "name"
-	PROFILE_TYPE                       = "profile_type"
-	VERSION                            = "version"
-	NUM_GEM_PORTS                      = "num_gem_ports"
-	INSTANCE_CONTROL                   = "instance_control"
-	US_SCHEDULER                       = "us_scheduler"
-	DS_SCHEDULER                       = "ds_scheduler"
-	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
-	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
-	ONU                                = "onu"
-	UNI                                = "uni"
-	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
-	DIRECTION                          = "direction"
-	ADDITIONAL_BW                      = "additional_bw"
-	PRIORITY                           = "priority"
-	Q_SCHED_POLICY                     = "q_sched_policy"
-	WEIGHT                             = "weight"
-	PBIT_MAP                           = "pbit_map"
-	DISCARD_CONFIG                     = "discard_config"
-	MAX_THRESHOLD                      = "max_threshold"
-	MIN_THRESHOLD                      = "min_threshold"
-	MAX_PROBABILITY                    = "max_probability"
-	DISCARD_POLICY                     = "discard_policy"
-	PRIORITY_Q                         = "priority_q"
-	SCHEDULING_POLICY                  = "scheduling_policy"
-	MAX_Q_SIZE                         = "max_q_size"
-	AES_ENCRYPTION                     = "aes_encryption"
-	// String Keys for EPON
-	EPON_ATTRIBUTE              = "epon_attribute"
-	PACKAGE_TYPE                = "package_type"
-	TRAFFIC_TYPE                = "traffic type"
-	UNSOLICITED_GRANT_SIZE      = "unsolicited_grant_size"
-	NOMINAL_INTERVAL            = "nominal_interval"
-	TOLERATED_POLL_JITTER       = "tolerated_poll_jitter"
-	REQUEST_TRANSMISSION_POLICY = "request_transmission_policy"
-	NUM_Q_SETS                  = "num_q_sets"
-	Q_THRESHOLDS                = "q_thresholds"
-	Q_THRESHOLD1                = "q_threshold1"
-	Q_THRESHOLD2                = "q_threshold2"
-	Q_THRESHOLD3                = "q_threshold3"
-	Q_THRESHOLD4                = "q_threshold4"
-	Q_THRESHOLD5                = "q_threshold5"
-	Q_THRESHOLD6                = "q_threshold6"
-	Q_THRESHOLD7                = "q_threshold7"
-)
-
-// TechprofileFlags represents the set of configurations used
-type TechProfileFlags struct {
-	KVStoreAddress        string
-	KVStoreType           string
-	KVStoreTimeout        time.Duration
-	KVBackend             *db.Backend // this is the backend used to store TP instances
-	DefaultTpKVBackend    *db.Backend // this is the backend used to read the TP profile
-	TPKVPathPrefix        string
-	defaultTpKvPathPrefix string
-	TPFileKVPath          string
-	TPInstanceKVPath      string
-	DefaultTPName         string
-	TPVersion             int
-	NumGemPorts           uint32
-	DefaultPbits          []string
-	LogLevel              int
-	DefaultTechProfileID  uint32
-	DefaultNumGemPorts    uint32
-}
-
-func NewTechProfileFlags(KVStoreType string, KVStoreAddress string, basePathKvStore string) *TechProfileFlags {
-	// initialize with default values
-	var techProfileFlags = TechProfileFlags{
-		KVBackend:             nil,
-		KVStoreAddress:        KVStoreAddress,
-		KVStoreType:           KVStoreType,
-		KVStoreTimeout:        defaultKVStoreTimeout,
-		DefaultTPName:         defaultTechProfileName,
-		TPKVPathPrefix:        fmt.Sprintf(defaultKVPathPrefix, basePathKvStore),
-		defaultTpKvPathPrefix: defaultTpKvPathPrefix,
-		TPVersion:             defaultVersion,
-		TPFileKVPath:          defaultTechProfileKVPath,
-		TPInstanceKVPath:      defaultTPInstanceKVPath,
-		DefaultTechProfileID:  DEFAULT_TECH_PROFILE_TABLE_ID,
-		DefaultNumGemPorts:    defaultGemportsCount,
-		DefaultPbits:          []string{defaultPbits},
-		LogLevel:              defaultLogLevel,
-	}
-
-	return &techProfileFlags
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
deleted file mode 100644
index 0f2741f..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
+++ /dev/null
@@ -1,1455 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
-	"context"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"regexp"
-	"strconv"
-	"sync"
-	"time"
-
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
-)
-
-// Interface to pon resource manager APIs
-type iPonResourceMgr interface {
-	GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
-	FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
-	GetResourceTypeAllocID() string
-	GetResourceTypeGemPortID() string
-	GetResourceTypeOnuID() string
-	GetTechnology() string
-}
-
-type Direction int32
-
-const (
-	Direction_UPSTREAM      Direction = 0
-	Direction_DOWNSTREAM    Direction = 1
-	Direction_BIDIRECTIONAL Direction = 2
-)
-
-var Direction_name = map[Direction]string{
-	0: "UPSTREAM",
-	1: "DOWNSTREAM",
-	2: "BIDIRECTIONAL",
-}
-
-type SchedulingPolicy int32
-
-const (
-	SchedulingPolicy_WRR            SchedulingPolicy = 0
-	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
-	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
-)
-
-var SchedulingPolicy_name = map[SchedulingPolicy]string{
-	0: "WRR",
-	1: "StrictPriority",
-	2: "Hybrid",
-}
-
-type AdditionalBW int32
-
-const (
-	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
-	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
-	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
-	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
-)
-
-var AdditionalBW_name = map[AdditionalBW]string{
-	0: "AdditionalBW_None",
-	1: "AdditionalBW_NA",
-	2: "AdditionalBW_BestEffort",
-	3: "AdditionalBW_Auto",
-}
-
-type DiscardPolicy int32
-
-const (
-	DiscardPolicy_TailDrop  DiscardPolicy = 0
-	DiscardPolicy_WTailDrop DiscardPolicy = 1
-	DiscardPolicy_Red       DiscardPolicy = 2
-	DiscardPolicy_WRed      DiscardPolicy = 3
-)
-
-var DiscardPolicy_name = map[DiscardPolicy]string{
-	0: "TailDrop",
-	1: "WTailDrop",
-	2: "Red",
-	3: "WRed",
-}
-
-// Required uniPortName format
-var uniPortNameFormat = regexp.MustCompile(`^olt-{[a-z0-9\-]+}/pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}$`)
-
-/*
-  type InferredAdditionBWIndication int32
-
-  const (
-	  InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
-	  InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
-	  InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
-  )
-
-  var InferredAdditionBWIndication_name = map[int32]string{
-	  0: "InferredAdditionBWIndication_None",
-	  1: "InferredAdditionBWIndication_Assured",
-	  2: "InferredAdditionBWIndication_BestEffort",
-  }
-*/
-// instance control defaults
-const (
-	defaultOnuInstance    = "multi-instance"
-	defaultUniInstance    = "single-instance"
-	defaultGemPayloadSize = "auto"
-)
-
-const MAX_GEM_PAYLOAD = "max_gem_payload_size"
-
-type InstanceControl struct {
-	Onu               string `json:"ONU"`
-	Uni               string `json:"uni"`
-	MaxGemPayloadSize string `json:"max_gem_payload_size"`
-}
-
-// default discard config constants
-const (
-	defaultMinThreshold   = 0
-	defaultMaxThreshold   = 0
-	defaultMaxProbability = 0
-)
-
-type DiscardConfig struct {
-	MinThreshold   int `json:"min_threshold"`
-	MaxThreshold   int `json:"max_threshold"`
-	MaxProbability int `json:"max_probability"`
-}
-
-// default scheduler contants
-const (
-	defaultAdditionalBw     = AdditionalBW_AdditionalBW_BestEffort
-	defaultPriority         = 0
-	defaultWeight           = 0
-	defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
-)
-
-type Scheduler struct {
-	Direction    string `json:"direction"`
-	AdditionalBw string `json:"additional_bw"`
-	Priority     uint32 `json:"priority"`
-	Weight       uint32 `json:"weight"`
-	QSchedPolicy string `json:"q_sched_policy"`
-}
-
-// default GEM attribute constants
-const (
-	defaultAESEncryption     = "True"
-	defaultPriorityQueue     = 0
-	defaultQueueWeight       = 0
-	defaultMaxQueueSize      = "auto"
-	defaultdropPolicy        = DiscardPolicy_TailDrop
-	defaultSchedulePolicy    = SchedulingPolicy_WRR
-	defaultIsMulticast       = "False"
-	defaultAccessControlList = "224.0.0.0-239.255.255.255"
-	defaultMcastGemID        = 4069
-)
-
-type GemPortAttribute struct {
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-	IsMulticast      string        `json:"is_multicast"`
-	DControlList     string        `json:"dynamic_access_control_list"`
-	SControlList     string        `json:"static_access_control_list"`
-	McastGemID       uint32        `json:"multicast_gem_id"`
-}
-
-// Instance of Scheduler
-type IScheduler struct {
-	AllocID      uint32 `json:"alloc_id"`
-	Direction    string `json:"direction"`
-	AdditionalBw string `json:"additional_bw"`
-	Priority     uint32 `json:"priority"`
-	Weight       uint32 `json:"weight"`
-	QSchedPolicy string `json:"q_sched_policy"`
-}
-
-// Instance of GemPortAttribute
-type IGemPortAttribute struct {
-	GemportID        uint32        `json:"gemport_id"`
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-	IsMulticast      string        `json:"is_multicast"`
-	DControlList     string        `json:"dynamic_access_control_list"`
-	SControlList     string        `json:"static_access_control_list"`
-	McastGemID       uint32        `json:"multicast_gem_id"`
-}
-
-type TechProfileMgr struct {
-	config            *TechProfileFlags
-	resourceMgr       iPonResourceMgr
-	OnuIDMgmtLock     sync.RWMutex
-	GemPortIDMgmtLock sync.RWMutex
-	AllocIDMgmtLock   sync.RWMutex
-}
-type DefaultTechProfile struct {
-	Name                           string             `json:"name"`
-	ProfileType                    string             `json:"profile_type"`
-	Version                        int                `json:"version"`
-	NumGemPorts                    uint32             `json:"num_gem_ports"`
-	InstanceCtrl                   InstanceControl    `json:"instance_control"`
-	UsScheduler                    Scheduler          `json:"us_scheduler"`
-	DsScheduler                    Scheduler          `json:"ds_scheduler"`
-	UpstreamGemPortAttributeList   []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
-	DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-type TechProfile struct {
-	Name                           string              `json:"name"`
-	SubscriberIdentifier           string              `json:"subscriber_identifier"`
-	ProfileType                    string              `json:"profile_type"`
-	Version                        int                 `json:"version"`
-	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	InstanceCtrl                   InstanceControl     `json:"instance_control"`
-	UsScheduler                    IScheduler          `json:"us_scheduler"`
-	DsScheduler                    IScheduler          `json:"ds_scheduler"`
-	UpstreamGemPortAttributeList   []IGemPortAttribute `json:"upstream_gem_port_attribute_list"`
-	DownstreamGemPortAttributeList []IGemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-
-// QThresholds struct for EPON
-type QThresholds struct {
-	QThreshold1 uint32 `json:"q_threshold1"`
-	QThreshold2 uint32 `json:"q_threshold2"`
-	QThreshold3 uint32 `json:"q_threshold3"`
-	QThreshold4 uint32 `json:"q_threshold4"`
-	QThreshold5 uint32 `json:"q_threshold5"`
-	QThreshold6 uint32 `json:"q_threshold6"`
-	QThreshold7 uint32 `json:"q_threshold7"`
-}
-
-// UpstreamQueueAttribute struct for EPON
-type UpstreamQueueAttribute struct {
-	MaxQueueSize              string        `json:"max_q_size"`
-	PbitMap                   string        `json:"pbit_map"`
-	AesEncryption             string        `json:"aes_encryption"`
-	TrafficType               string        `json:"traffic_type"`
-	UnsolicitedGrantSize      uint32        `json:"unsolicited_grant_size"`
-	NominalInterval           uint32        `json:"nominal_interval"`
-	ToleratedPollJitter       uint32        `json:"tolerated_poll_jitter"`
-	RequestTransmissionPolicy uint32        `json:"request_transmission_policy"`
-	NumQueueSet               uint32        `json:"num_q_sets"`
-	QThresholds               QThresholds   `json:"q_thresholds"`
-	SchedulingPolicy          string        `json:"scheduling_policy"`
-	PriorityQueue             uint32        `json:"priority_q"`
-	Weight                    uint32        `json:"weight"`
-	DiscardPolicy             string        `json:"discard_policy"`
-	DiscardConfig             DiscardConfig `json:"discard_config"`
-}
-
-// Default EPON constants
-const (
-	defaultPakageType = "B"
-)
-const (
-	defaultTrafficType               = "BE"
-	defaultUnsolicitedGrantSize      = 0
-	defaultNominalInterval           = 0
-	defaultToleratedPollJitter       = 0
-	defaultRequestTransmissionPolicy = 0
-	defaultNumQueueSet               = 2
-)
-const (
-	defaultQThreshold1 = 5500
-	defaultQThreshold2 = 0
-	defaultQThreshold3 = 0
-	defaultQThreshold4 = 0
-	defaultQThreshold5 = 0
-	defaultQThreshold6 = 0
-	defaultQThreshold7 = 0
-)
-
-// DownstreamQueueAttribute struct for EPON
-type DownstreamQueueAttribute struct {
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-}
-
-// iUpstreamQueueAttribute struct for EPON
-type iUpstreamQueueAttribute struct {
-	GemportID                 uint32        `json:"q_id"`
-	MaxQueueSize              string        `json:"max_q_size"`
-	PbitMap                   string        `json:"pbit_map"`
-	AesEncryption             string        `json:"aes_encryption"`
-	TrafficType               string        `json:"traffic_type"`
-	UnsolicitedGrantSize      uint32        `json:"unsolicited_grant_size"`
-	NominalInterval           uint32        `json:"nominal_interval"`
-	ToleratedPollJitter       uint32        `json:"tolerated_poll_jitter"`
-	RequestTransmissionPolicy uint32        `json:"request_transmission_policy"`
-	NumQueueSet               uint32        `json:"num_q_sets"`
-	QThresholds               QThresholds   `json:"q_thresholds"`
-	SchedulingPolicy          string        `json:"scheduling_policy"`
-	PriorityQueue             uint32        `json:"priority_q"`
-	Weight                    uint32        `json:"weight"`
-	DiscardPolicy             string        `json:"discard_policy"`
-	DiscardConfig             DiscardConfig `json:"discard_config"`
-}
-
-// iDownstreamQueueAttribute struct for EPON
-type iDownstreamQueueAttribute struct {
-	GemportID        uint32        `json:"q_id"`
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-}
-
-// EponAttribute struct for EPON
-type EponAttribute struct {
-	PackageType string `json:"pakage_type"`
-}
-
-// DefaultTechProfile struct for EPON
-type DefaultEponProfile struct {
-	Name                         string                     `json:"name"`
-	ProfileType                  string                     `json:"profile_type"`
-	Version                      int                        `json:"version"`
-	NumGemPorts                  uint32                     `json:"num_gem_ports"`
-	InstanceCtrl                 InstanceControl            `json:"instance_control"`
-	EponAttribute                EponAttribute              `json:"epon_attribute"`
-	UpstreamQueueAttributeList   []UpstreamQueueAttribute   `json:"upstream_queue_attribute_list"`
-	DownstreamQueueAttributeList []DownstreamQueueAttribute `json:"downstream_queue_attribute_list"`
-}
-
-// TechProfile struct for EPON
-type EponProfile struct {
-	Name                         string                      `json:"name"`
-	SubscriberIdentifier         string                      `json:"subscriber_identifier"`
-	ProfileType                  string                      `json:"profile_type"`
-	Version                      int                         `json:"version"`
-	NumGemPorts                  uint32                      `json:"num_gem_ports"`
-	InstanceCtrl                 InstanceControl             `json:"instance_control"`
-	EponAttribute                EponAttribute               `json:"epon_attribute"`
-	AllocID                      uint32                      `json:"llid"`
-	UpstreamQueueAttributeList   []iUpstreamQueueAttribute   `json:"upstream_queue_attribute_list"`
-	DownstreamQueueAttributeList []iDownstreamQueueAttribute `json:"downstream_queue_attribute_list"`
-}
-
-const (
-	xgspon = "XGS-PON"
-	gpon   = "GPON"
-	epon   = "EPON"
-)
-
-func (t *TechProfileMgr) SetKVClient(ctx context.Context, pathPrefix string) *db.Backend {
-	kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
-	if err != nil {
-		logger.Errorw(ctx, "failed-to-create-kv-client",
-			log.Fields{
-				"type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
-				"timeout": t.config.KVStoreTimeout, "prefix": pathPrefix,
-				"error": err.Error(),
-			})
-		return nil
-	}
-	return &db.Backend{
-		Client:     kvClient,
-		StoreType:  t.config.KVStoreType,
-		Address:    t.config.KVStoreAddress,
-		Timeout:    t.config.KVStoreTimeout,
-		PathPrefix: pathPrefix}
-
-	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
-		  issue between kv store and backend , core is not calling NewBackend directly
-	 kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
-								  t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
-	*/
-}
-
-func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
-
-	logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
-	switch storeType {
-	case "etcd":
-		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
-	}
-	return nil, errors.New("unsupported-kv-store")
-}
-
-func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string, basePathKvStore string) (*TechProfileMgr, error) {
-	var techprofileObj TechProfileMgr
-	logger.Debug(ctx, "Initializing techprofile Manager")
-	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress, basePathKvStore)
-	techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.TPKVPathPrefix)
-	techprofileObj.config.DefaultTpKVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.defaultTpKvPathPrefix)
-	if techprofileObj.config.KVBackend == nil {
-		logger.Error(ctx, "Failed to initialize KV backend\n")
-		return nil, errors.New("KV backend init failed")
-	}
-	techprofileObj.resourceMgr = resourceMgr
-	logger.Debug(ctx, "Initializing techprofile object instance success")
-	return &techprofileObj, nil
-}
-
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
-	logger.Debugw(ctx, "get-tp-instance-kv-path", log.Fields{
-		"uniPortName": uniPortName,
-		"tpId":        techProfiletblID,
-	})
-	return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
-}
-
-func (t *TechProfileMgr) GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error) {
-	var err error
-	var kvResult *kvstore.KVPair
-	var KvTpIns TechProfile
-	var KvEponIns EponProfile
-	var resPtr interface{}
-	// For example:
-	// tpInstPath like "XGS-PON/64/uni_port_name"
-	// is broken into ["XGS-PON" "64" ...]
-	pathSlice := regexp.MustCompile(`/`).Split(path, -1)
-	switch pathSlice[0] {
-	case xgspon, gpon:
-		resPtr = &KvTpIns
-	case epon:
-		resPtr = &KvEponIns
-	default:
-		logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": pathSlice[0]})
-		return nil, fmt.Errorf("unknown-tech-%s", pathSlice[0])
-	}
-
-	kvResult, _ = t.config.KVBackend.Get(ctx, path)
-	if kvResult == nil {
-		logger.Infow(ctx, "tp-instance-not-found-on-kv", log.Fields{"key": path})
-		return nil, nil
-	} else {
-		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
-			if err = json.Unmarshal(value, resPtr); err != nil {
-				logger.Errorw(ctx, "error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
-				return nil, errors.New("error-unmarshal-kv-result")
-			} else {
-				return resPtr, nil
-			}
-		}
-	}
-	return nil, err
-}
-
-func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
-	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
-	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
-	tpInstanceJson, err := json.Marshal(*tpInstance)
-	if err == nil {
-		// Backend will convert JSON byte array into string format
-		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
-		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
-	} else {
-		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
-	}
-	return err
-}
-
-func (t *TechProfileMgr) addEponProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *EponProfile) error {
-	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
-	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
-	tpInstanceJson, err := json.Marshal(*tpInstance)
-	if err == nil {
-		// Backend will convert JSON byte array into string format
-		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
-		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
-	} else {
-		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
-	}
-	return err
-}
-
-func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
-	var kvtechprofile DefaultTechProfile
-	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
-	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
-	if err != nil {
-		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
-		return nil
-	}
-	if kvresult != nil {
-		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
-		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
-			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
-				return nil
-			}
-
-			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
-			return &kvtechprofile
-		}
-	}
-	return nil
-}
-
-func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultEponProfile {
-	var kvtechprofile DefaultEponProfile
-	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
-	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
-	if err != nil {
-		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
-		return nil
-	}
-	if kvresult != nil {
-		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
-		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
-			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
-				return nil
-			}
-
-			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
-			return &kvtechprofile
-		}
-	}
-	return nil
-}
-
-func (t *TechProfileMgr) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error) {
-	var tpInstance *TechProfile
-	var tpEponInstance *EponProfile
-
-	logger.Infow(ctx, "creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
-
-	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
-	if !uniPortNameFormat.Match([]byte(uniPortName)) {
-		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
-		return nil, errors.New("uni-port-name-not-confirming-to-format")
-	}
-	tpInstancePath := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
-	// For example:
-	// tpInstPath like "XGS-PON/64/uni_port_name"
-	// is broken into ["XGS-PON" "64" ...]
-	pathSlice := regexp.MustCompile(`/`).Split(tpInstancePath, -1)
-	if pathSlice[0] == epon {
-		tp := t.getEponTPFromKVStore(ctx, techProfiletblID)
-		if tp != nil {
-			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
-				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
-				tp = t.getDefaultEponProfile(ctx)
-			} else {
-				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
-			}
-		} else {
-			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
-			tp = t.getDefaultEponProfile(ctx)
-		}
-
-		if tpEponInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpEponInstance == nil {
-			logger.Error(ctx, "tp-intance-allocation-failed")
-			return nil, errors.New("tp-intance-allocation-failed")
-		}
-		if err := t.addEponProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpEponInstance); err != nil {
-			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
-			return nil, errors.New("error-adding-tp-to-kv-store")
-		}
-		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
-			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
-		return tpEponInstance, nil
-	} else {
-		tp := t.getTPFromKVStore(ctx, techProfiletblID)
-		if tp != nil {
-			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
-				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
-				tp = t.getDefaultTechProfile(ctx)
-			} else {
-				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
-			}
-		} else {
-			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
-			tp = t.getDefaultTechProfile(ctx)
-		}
-
-		if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
-			logger.Error(ctx, "tp-intance-allocation-failed")
-			return nil, errors.New("tp-intance-allocation-failed")
-		}
-		if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
-			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
-			return nil, errors.New("error-adding-tp-to-kv-store")
-		}
-		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
-			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
-		return tpInstance, nil
-	}
-}
-
-func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
-	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
-	return t.config.KVBackend.Delete(ctx, path)
-}
-
-func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl InstanceControl) error {
-	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
-		logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
-		return errors.New("invalid-onu-instance-ctl-attr")
-	}
-
-	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
-		logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
-		return errors.New("invalid-uni-instance-ctl-attr")
-	}
-
-	if instCtl.Uni == "multi-instance" {
-		logger.Error(ctx, "uni-multi-instance-tp-not-supported")
-		return errors.New("uni-multi-instance-tp-not-supported")
-	}
-
-	return nil
-}
-
-func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
-
-	var usGemPortAttributeList []IGemPortAttribute
-	var dsGemPortAttributeList []IGemPortAttribute
-	var dsMulticastGemAttributeList []IGemPortAttribute
-	var dsUnicastGemAttributeList []IGemPortAttribute
-	var tcontIDs []uint32
-	var gemPorts []uint32
-	var err error
-
-	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
-
-	if tp.InstanceCtrl.Onu == "multi-instance" {
-		tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
-		if err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-			return nil
-		}
-	} else { // "single-instance"
-		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-			return nil
-		} else if tpInst == nil {
-			// No "single-instance" tp found on one any uni port for the given TP ID
-			// Allocate a new TcontID or AllocID
-			tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
-			if err != nil {
-				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-				return nil
-			}
-		} else {
-			// Use the alloc-id from the existing TpInstance
-			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
-		}
-	}
-	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
-	gemPorts, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
-	if err != nil {
-		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
-		return nil
-	}
-	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
-	for index := 0; index < int(tp.NumGemPorts); index++ {
-		usGemPortAttributeList = append(usGemPortAttributeList,
-			IGemPortAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
-				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
-				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
-				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
-				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
-				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
-				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
-	}
-
-	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
-	//put multicast and unicast downstream GEM port attributes in different lists first
-	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
-		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
-			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
-				IGemPortAttribute{
-					McastGemID:       tp.DownstreamGemPortAttributeList[index].McastGemID,
-					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
-					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
-					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
-					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
-					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
-					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
-					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
-					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig,
-					IsMulticast:      tp.DownstreamGemPortAttributeList[index].IsMulticast,
-					DControlList:     tp.DownstreamGemPortAttributeList[index].DControlList,
-					SControlList:     tp.DownstreamGemPortAttributeList[index].SControlList})
-		} else {
-			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
-				IGemPortAttribute{
-					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
-					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
-					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
-					SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
-					PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
-					Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
-					DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
-					DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
-		}
-	}
-	//add unicast downstream GEM ports to dsGemPortAttributeList
-	for index := 0; index < int(tp.NumGemPorts); index++ {
-		dsGemPortAttributeList = append(dsGemPortAttributeList,
-			IGemPortAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     dsUnicastGemAttributeList[index].MaxQueueSize,
-				PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
-				AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
-				SchedulingPolicy: dsUnicastGemAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    dsUnicastGemAttributeList[index].PriorityQueue,
-				Weight:           dsUnicastGemAttributeList[index].Weight,
-				DiscardPolicy:    dsUnicastGemAttributeList[index].DiscardPolicy,
-				DiscardConfig:    dsUnicastGemAttributeList[index].DiscardConfig})
-	}
-	//add multicast GEM ports to dsGemPortAttributeList afterwards
-	for k := range dsMulticastGemAttributeList {
-		dsGemPortAttributeList = append(dsGemPortAttributeList, dsMulticastGemAttributeList[k])
-	}
-
-	return &TechProfile{
-		SubscriberIdentifier: uniPortName,
-		Name:                 tp.Name,
-		ProfileType:          tp.ProfileType,
-		Version:              tp.Version,
-		NumGemPorts:          tp.NumGemPorts,
-		InstanceCtrl:         tp.InstanceCtrl,
-		UsScheduler: IScheduler{
-			AllocID:      tcontIDs[0],
-			Direction:    tp.UsScheduler.Direction,
-			AdditionalBw: tp.UsScheduler.AdditionalBw,
-			Priority:     tp.UsScheduler.Priority,
-			Weight:       tp.UsScheduler.Weight,
-			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
-		DsScheduler: IScheduler{
-			AllocID:      tcontIDs[0],
-			Direction:    tp.DsScheduler.Direction,
-			AdditionalBw: tp.DsScheduler.AdditionalBw,
-			Priority:     tp.DsScheduler.Priority,
-			Weight:       tp.DsScheduler.Weight,
-			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
-		UpstreamGemPortAttributeList:   usGemPortAttributeList,
-		DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-// allocateTPInstance function for EPON
-func (t *TechProfileMgr) allocateEponTPInstance(ctx context.Context, uniPortName string, tp *DefaultEponProfile, intfId uint32, tpInstPath string) *EponProfile {
-
-	var usQueueAttributeList []iUpstreamQueueAttribute
-	var dsQueueAttributeList []iDownstreamQueueAttribute
-	var tcontIDs []uint32
-	var gemPorts []uint32
-	var err error
-
-	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
-
-	if tp.InstanceCtrl.Onu == "multi-instance" {
-		if tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-			return nil
-		}
-	} else { // "single-instance"
-		if tpInst, err := t.getSingleInstanceEponTp(ctx, tpInstPath); err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-			return nil
-		} else if tpInst == nil {
-			// No "single-instance" tp found on one any uni port for the given TP ID
-			// Allocate a new TcontID or AllocID
-			if tcontIDs, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
-				return nil
-			}
-		} else {
-			// Use the alloc-id from the existing TpInstance
-			tcontIDs = append(tcontIDs, tpInst.AllocID)
-		}
-	}
-	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
-	if gemPorts, err = t.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
-		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
-		return nil
-	}
-	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
-	for index := 0; index < int(tp.NumGemPorts); index++ {
-		usQueueAttributeList = append(usQueueAttributeList,
-			iUpstreamQueueAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:              tp.UpstreamQueueAttributeList[index].MaxQueueSize,
-				PbitMap:                   tp.UpstreamQueueAttributeList[index].PbitMap,
-				AesEncryption:             tp.UpstreamQueueAttributeList[index].AesEncryption,
-				TrafficType:               tp.UpstreamQueueAttributeList[index].TrafficType,
-				UnsolicitedGrantSize:      tp.UpstreamQueueAttributeList[index].UnsolicitedGrantSize,
-				NominalInterval:           tp.UpstreamQueueAttributeList[index].NominalInterval,
-				ToleratedPollJitter:       tp.UpstreamQueueAttributeList[index].ToleratedPollJitter,
-				RequestTransmissionPolicy: tp.UpstreamQueueAttributeList[index].RequestTransmissionPolicy,
-				NumQueueSet:               tp.UpstreamQueueAttributeList[index].NumQueueSet,
-				QThresholds:               tp.UpstreamQueueAttributeList[index].QThresholds,
-				SchedulingPolicy:          tp.UpstreamQueueAttributeList[index].SchedulingPolicy,
-				PriorityQueue:             tp.UpstreamQueueAttributeList[index].PriorityQueue,
-				Weight:                    tp.UpstreamQueueAttributeList[index].Weight,
-				DiscardPolicy:             tp.UpstreamQueueAttributeList[index].DiscardPolicy,
-				DiscardConfig:             tp.UpstreamQueueAttributeList[index].DiscardConfig})
-	}
-
-	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamQueueAttributeList))
-	for index := 0; index < int(tp.NumGemPorts); index++ {
-		dsQueueAttributeList = append(dsQueueAttributeList,
-			iDownstreamQueueAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     tp.DownstreamQueueAttributeList[index].MaxQueueSize,
-				PbitMap:          tp.DownstreamQueueAttributeList[index].PbitMap,
-				AesEncryption:    tp.DownstreamQueueAttributeList[index].AesEncryption,
-				SchedulingPolicy: tp.DownstreamQueueAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    tp.DownstreamQueueAttributeList[index].PriorityQueue,
-				Weight:           tp.DownstreamQueueAttributeList[index].Weight,
-				DiscardPolicy:    tp.DownstreamQueueAttributeList[index].DiscardPolicy,
-				DiscardConfig:    tp.DownstreamQueueAttributeList[index].DiscardConfig})
-	}
-
-	return &EponProfile{
-		SubscriberIdentifier:         uniPortName,
-		Name:                         tp.Name,
-		ProfileType:                  tp.ProfileType,
-		Version:                      tp.Version,
-		NumGemPorts:                  tp.NumGemPorts,
-		InstanceCtrl:                 tp.InstanceCtrl,
-		EponAttribute:                tp.EponAttribute,
-		AllocID:                      tcontIDs[0],
-		UpstreamQueueAttributeList:   usQueueAttributeList,
-		DownstreamQueueAttributeList: dsQueueAttributeList}
-}
-
-// getSingleInstanceTp returns another TpInstance for an ONU on a different
-// uni port for the same TP ID, if it finds one, else nil.
-func (t *TechProfileMgr) getSingleInstanceTp(ctx context.Context, tpPath string) (*TechProfile, error) {
-	var tpInst TechProfile
-
-	// For example:
-	// tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
-	// is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
-	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
-	kvPairs, _ := t.config.KVBackend.List(ctx, uniPathSlice[0])
-
-	// Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
-	for keyPath, kvPair := range kvPairs {
-		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
-			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
-				return nil, errors.New("error-unmarshal-kv-pair")
-			} else {
-				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
-				return &tpInst, nil
-			}
-		}
-	}
-	return nil, nil
-}
-
-func (t *TechProfileMgr) getSingleInstanceEponTp(ctx context.Context, tpPath string) (*EponProfile, error) {
-	var tpInst EponProfile
-
-	// For example:
-	// tpPath like "service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}/uni-{1}"
-	// is broken into ["service/voltha/technology_profiles/xgspon/64/pon-{0}/onu-{1}" ""]
-	uniPathSlice := regexp.MustCompile(`/uni-{[0-9]+}$`).Split(tpPath, 2)
-	kvPairs, _ := t.config.KVBackend.List(ctx, uniPathSlice[0])
-
-	// Find a valid TP Instance among all the UNIs of that ONU for the given TP ID
-	for keyPath, kvPair := range kvPairs {
-		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
-			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
-				return nil, errors.New("error-unmarshal-kv-pair")
-			} else {
-				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
-				return &tpInst, nil
-			}
-		}
-	}
-	return nil, nil
-}
-
-func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *DefaultTechProfile {
-	var usGemPortAttributeList []GemPortAttribute
-	var dsGemPortAttributeList []GemPortAttribute
-
-	for _, pbit := range t.config.DefaultPbits {
-		logger.Debugw(ctx, "Creating GEM port", log.Fields{"pbit": pbit})
-		usGemPortAttributeList = append(usGemPortAttributeList,
-			GemPortAttribute{
-				MaxQueueSize:     defaultMaxQueueSize,
-				PbitMap:          pbit,
-				AesEncryption:    defaultAESEncryption,
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
-		dsGemPortAttributeList = append(dsGemPortAttributeList,
-			GemPortAttribute{
-				MaxQueueSize:     defaultMaxQueueSize,
-				PbitMap:          pbit,
-				AesEncryption:    defaultAESEncryption,
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability},
-				IsMulticast:  defaultIsMulticast,
-				DControlList: defaultAccessControlList,
-				SControlList: defaultAccessControlList,
-				McastGemID:   defaultMcastGemID})
-	}
-	return &DefaultTechProfile{
-		Name:        t.config.DefaultTPName,
-		ProfileType: t.resourceMgr.GetTechnology(),
-		Version:     t.config.TPVersion,
-		NumGemPorts: uint32(len(usGemPortAttributeList)),
-		InstanceCtrl: InstanceControl{
-			Onu:               defaultOnuInstance,
-			Uni:               defaultUniInstance,
-			MaxGemPayloadSize: defaultGemPayloadSize},
-		UsScheduler: Scheduler{
-			Direction:    Direction_name[Direction_UPSTREAM],
-			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
-			Priority:     defaultPriority,
-			Weight:       defaultWeight,
-			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
-		DsScheduler: Scheduler{
-			Direction:    Direction_name[Direction_DOWNSTREAM],
-			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
-			Priority:     defaultPriority,
-			Weight:       defaultWeight,
-			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
-		UpstreamGemPortAttributeList:   usGemPortAttributeList,
-		DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-// getDefaultTechProfile function for EPON
-func (t *TechProfileMgr) getDefaultEponProfile(ctx context.Context) *DefaultEponProfile {
-
-	var usQueueAttributeList []UpstreamQueueAttribute
-	var dsQueueAttributeList []DownstreamQueueAttribute
-
-	for _, pbit := range t.config.DefaultPbits {
-		logger.Debugw(ctx, "Creating Queue", log.Fields{"pbit": pbit})
-		usQueueAttributeList = append(usQueueAttributeList,
-			UpstreamQueueAttribute{
-				MaxQueueSize:              defaultMaxQueueSize,
-				PbitMap:                   pbit,
-				AesEncryption:             defaultAESEncryption,
-				TrafficType:               defaultTrafficType,
-				UnsolicitedGrantSize:      defaultUnsolicitedGrantSize,
-				NominalInterval:           defaultNominalInterval,
-				ToleratedPollJitter:       defaultToleratedPollJitter,
-				RequestTransmissionPolicy: defaultRequestTransmissionPolicy,
-				NumQueueSet:               defaultNumQueueSet,
-				QThresholds: QThresholds{
-					QThreshold1: defaultQThreshold1,
-					QThreshold2: defaultQThreshold2,
-					QThreshold3: defaultQThreshold3,
-					QThreshold4: defaultQThreshold4,
-					QThreshold5: defaultQThreshold5,
-					QThreshold6: defaultQThreshold6,
-					QThreshold7: defaultQThreshold7},
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
-		dsQueueAttributeList = append(dsQueueAttributeList,
-			DownstreamQueueAttribute{
-				MaxQueueSize:     defaultMaxQueueSize,
-				PbitMap:          pbit,
-				AesEncryption:    defaultAESEncryption,
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
-	}
-	return &DefaultEponProfile{
-		Name:        t.config.DefaultTPName,
-		ProfileType: t.resourceMgr.GetTechnology(),
-		Version:     t.config.TPVersion,
-		NumGemPorts: uint32(len(usQueueAttributeList)),
-		InstanceCtrl: InstanceControl{
-			Onu:               defaultOnuInstance,
-			Uni:               defaultUniInstance,
-			MaxGemPayloadSize: defaultGemPayloadSize},
-		EponAttribute: EponAttribute{
-			PackageType: defaultPakageType},
-		UpstreamQueueAttributeList:   usQueueAttributeList,
-		DownstreamQueueAttributeList: dsQueueAttributeList}
-}
-
-func (t *TechProfileMgr) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
-	var result int32 = -1
-
-	if paramType == "direction" {
-		for key, val := range tp_pb.Direction_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else if paramType == "discard_policy" {
-		for key, val := range tp_pb.DiscardPolicy_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else if paramType == "sched_policy" {
-		for key, val := range tp_pb.SchedulingPolicy_value {
-			if key == paramKey {
-				logger.Debugw(ctx, "Got value in proto", log.Fields{"key": key, "value": val})
-				result = val
-			}
-		}
-	} else if paramType == "additional_bw" {
-		for key, val := range tp_pb.AdditionalBW_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else {
-		logger.Error(ctx, "Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
-		return -1
-	}
-	logger.Debugw(ctx, "Got value in proto", log.Fields{"key": paramKey, "value": result})
-	return result
-}
-
-func (t *TechProfileMgr) GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.UsScheduler.Direction))
-	if dir == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
-		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
-	}
-
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.UsScheduler.AdditionalBw))
-	if bw == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
-		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
-	}
-
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.UsScheduler.QSchedPolicy))
-	if policy == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
-		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
-	}
-
-	return &tp_pb.SchedulerConfig{
-		Direction:    dir,
-		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Weight,
-		SchedPolicy:  policy}, nil
-}
-
-func (t *TechProfileMgr) GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-
-	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.DsScheduler.Direction))
-	if dir == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
-		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
-	}
-
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.DsScheduler.AdditionalBw))
-	if bw == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
-		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
-	}
-
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.DsScheduler.QSchedPolicy))
-	if policy == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
-		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
-	}
-
-	return &tp_pb.SchedulerConfig{
-		Direction:    dir,
-		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Weight,
-		SchedPolicy:  policy}, nil
-}
-
-func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
-	ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
-
-	tSched := &tp_pb.TrafficScheduler{
-		Direction:          SchedCfg.Direction,
-		AllocId:            tpInstance.UsScheduler.AllocID,
-		TrafficShapingInfo: ShapingCfg,
-		Scheduler:          SchedCfg}
-
-	return tSched
-}
-
-func (tpm *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
-
-	var encryp bool
-	if Dir == tp_pb.Direction_UPSTREAM {
-		// upstream GEM ports
-		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
-		GemPorts := make([]*tp_pb.TrafficQueue, 0)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
-				encryp = true
-			} else {
-				encryp = false
-			}
-
-			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
-			if schedPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
-				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
-			}
-
-			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
-			if discardPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
-				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
-			}
-
-			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.UsScheduler.Direction)),
-				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
-				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
-				AesEncryption: encryp,
-				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
-				Priority:      tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
-				Weight:        tp.UpstreamGemPortAttributeList[Count].Weight,
-				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
-			})
-		}
-		logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
-		return GemPorts, nil
-	} else if Dir == tp_pb.Direction_DOWNSTREAM {
-		//downstream GEM ports
-		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
-		GemPorts := make([]*tp_pb.TrafficQueue, 0)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			if isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
-				//do not take multicast GEM ports. They are handled separately.
-				continue
-			}
-			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
-				encryp = true
-			} else {
-				encryp = false
-			}
-
-			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
-			if schedPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
-				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
-			}
-
-			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
-			if discardPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
-				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
-			}
-
-			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
-				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
-				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
-				AesEncryption: encryp,
-				SchedPolicy:   tp_pb.SchedulingPolicy(schedPolicy),
-				Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
-				Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
-			})
-		}
-		logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
-		return GemPorts, nil
-	}
-
-	logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", Dir)
-	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
-}
-
-//isMulticastGem returns true if isMulticast attribute value of a GEM port is true; false otherwise
-func isMulticastGem(isMulticastAttrValue string) bool {
-	return isMulticastAttrValue != "" &&
-		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
-}
-
-func (tpm *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue {
-	var encryp bool
-	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
-	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
-	for Count := 0; Count < NumGemPorts; Count++ {
-		if !isMulticastGem(tp.DownstreamGemPortAttributeList[Count].IsMulticast) {
-			continue
-		}
-		if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
-			encryp = true
-		} else {
-			encryp = false
-		}
-		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
-			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
-			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
-			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
-			AesEncryption: encryp,
-			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
-			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
-			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
-		})
-	}
-	logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
-	return mcastTrafficQueues
-}
-
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(ctx context.Context, tp *TechProfile) *tp_pb.TrafficScheduler {
-	UsScheduler, _ := tpm.GetUsScheduler(ctx, tp)
-
-	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
-		AllocId:   tp.UsScheduler.AllocID,
-		Scheduler: UsScheduler}
-}
-
-func (t *TechProfileMgr) GetGemportForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) interface{} {
-	/*
-	  Function to get the Gemport mapped to a pbit.
-	*/
-	switch tp := tp.(type) {
-	case *TechProfile:
-		if dir == tp_pb.Direction_UPSTREAM {
-			// upstream GEM ports
-			numGemPorts := len(tp.UpstreamGemPortAttributeList)
-			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
-				lenOfPbitMap := len(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap)
-				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
-					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
-					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
-					if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
-						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
-							return tp.UpstreamGemPortAttributeList[gemCnt]
-						}
-					}
-				}
-			}
-		} else if dir == tp_pb.Direction_DOWNSTREAM {
-			//downstream GEM ports
-			numGemPorts := len(tp.DownstreamGemPortAttributeList)
-			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
-				lenOfPbitMap := len(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap)
-				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
-					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
-					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
-					if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
-						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
-							return tp.DownstreamGemPortAttributeList[gemCnt]
-						}
-					}
-				}
-			}
-		}
-		logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
-	case *EponProfile:
-		if dir == tp_pb.Direction_UPSTREAM {
-			// upstream GEM ports
-			numGemPorts := len(tp.UpstreamQueueAttributeList)
-			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
-				lenOfPbitMap := len(tp.UpstreamQueueAttributeList[gemCnt].PbitMap)
-				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
-					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
-					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
-					if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
-						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
-							return tp.UpstreamQueueAttributeList[gemCnt]
-						}
-					}
-				}
-			}
-		} else if dir == tp_pb.Direction_DOWNSTREAM {
-			//downstream GEM ports
-			numGemPorts := len(tp.DownstreamQueueAttributeList)
-			for gemCnt := 0; gemCnt < numGemPorts; gemCnt++ {
-				lenOfPbitMap := len(tp.DownstreamQueueAttributeList[gemCnt].PbitMap)
-				for pbitMapIdx := 2; pbitMapIdx < lenOfPbitMap; pbitMapIdx++ {
-					// Given a sample pbit map string "0b00000001", lenOfPbitMap is 10
-					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
-					if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
-						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
-							return tp.DownstreamQueueAttributeList[gemCnt]
-						}
-					}
-				}
-			}
-		}
-		logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
-	default:
-		logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
-	}
-	return nil
-}
-
-// FindAllTpInstances returns all TechProfile instances for a given TechProfile table-id, pon interface ID and onu ID.
-func (t *TechProfileMgr) FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) interface{} {
-	var tpTech TechProfile
-	var tpEpon EponProfile
-
-	onuTpInstancePath := fmt.Sprintf("%s/%d/pon-{%d}/onu-{%d}", t.resourceMgr.GetTechnology(), techProfiletblID, ponIntf, onuID)
-
-	if kvPairs, _ := t.config.KVBackend.List(ctx, onuTpInstancePath); kvPairs != nil {
-		tech := t.resourceMgr.GetTechnology()
-		tpInstancesTech := make([]TechProfile, 0, len(kvPairs))
-		tpInstancesEpon := make([]EponProfile, 0, len(kvPairs))
-
-		for kvPath, kvPair := range kvPairs {
-			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
-				if tech == xgspon || tech == gpon {
-					if err = json.Unmarshal(value, &tpTech); err != nil {
-						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
-						continue
-					} else {
-						tpInstancesTech = append(tpInstancesTech, tpTech)
-					}
-				} else if tech == epon {
-					if err = json.Unmarshal(value, &tpEpon); err != nil {
-						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
-						continue
-					} else {
-						tpInstancesEpon = append(tpInstancesEpon, tpEpon)
-					}
-				}
-			}
-		}
-
-		switch tech {
-		case xgspon, gpon:
-			return tpInstancesTech
-		case epon:
-			return tpInstancesEpon
-		default:
-			logger.Errorw(ctx, "unknown-technology", log.Fields{"tech": tech})
-			return nil
-		}
-	}
-	return nil
-}
-
-func (t *TechProfileMgr) GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
-	logger.Debugw(ctx, "getting-resource-id", log.Fields{
-		"intf-id":       IntfID,
-		"resource-type": ResourceType,
-		"num":           NumIDs,
-	})
-	var err error
-	var ids []uint32
-	switch ResourceType {
-	case t.resourceMgr.GetResourceTypeAllocID():
-		t.AllocIDMgmtLock.Lock()
-		ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
-		t.AllocIDMgmtLock.Unlock()
-	case t.resourceMgr.GetResourceTypeGemPortID():
-		t.GemPortIDMgmtLock.Lock()
-		ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
-		t.GemPortIDMgmtLock.Unlock()
-	case t.resourceMgr.GetResourceTypeOnuID():
-		t.OnuIDMgmtLock.Lock()
-		ids, err = t.resourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
-		t.OnuIDMgmtLock.Unlock()
-	default:
-		return nil, fmt.Errorf("ResourceType %s not supported", ResourceType)
-	}
-	if err != nil {
-		return nil, err
-	}
-	return ids, nil
-}
-
-func (t *TechProfileMgr) FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error {
-	logger.Debugw(ctx, "freeing-resource-id", log.Fields{
-		"intf-id":         IntfID,
-		"resource-type":   ResourceType,
-		"release-content": ReleaseContent,
-	})
-	var err error
-	switch ResourceType {
-	case t.resourceMgr.GetResourceTypeAllocID():
-		t.AllocIDMgmtLock.Lock()
-		err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
-		t.AllocIDMgmtLock.Unlock()
-	case t.resourceMgr.GetResourceTypeGemPortID():
-		t.GemPortIDMgmtLock.Lock()
-		err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
-		t.GemPortIDMgmtLock.Unlock()
-	case t.resourceMgr.GetResourceTypeOnuID():
-		t.OnuIDMgmtLock.Lock()
-		err = t.resourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
-		t.OnuIDMgmtLock.Unlock()
-	default:
-		return fmt.Errorf("ResourceType %s not supported", ResourceType)
-	}
-	if err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
deleted file mode 100644
index 84d84ee..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
-	"context"
-
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
-)
-
-type TechProfileIf interface {
-	SetKVClient(ctx context.Context, pathPrefix string) *db.Backend
-	GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string
-	GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error)
-	CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error)
-	DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
-	GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32
-	GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
-	GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
-	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
-		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
-	GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
-	GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue
-	GetGemportForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) interface{}
-	FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) interface{}
-	GetResourceID(ctx context.Context, IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
-	FreeResourceID(ctx context.Context, IntfID uint32, ResourceType string, ReleaseContent []uint32) error
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
similarity index 80%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
index 30fcead..c514d6d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -33,4 +33,13 @@
 		toDeviceID string,
 		proxyDeviceID string,
 		messageID string) error
+	TechProfileInstanceRequest(ctx context.Context,
+		tpPath string,
+		ponIntfID uint32,
+		onuID uint32,
+		uniID uint32,
+		fromAdapter string,
+		toAdapter string,
+		toDeviceID string,
+		proxyDeviceID string) (*ic.InterAdapterTechProfileDownloadMessage, error)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif/core_proxy_if.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
new file mode 100644
index 0000000..fc31041
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/adapter_proxy.go
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"google.golang.org/grpc/status"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+)
+
+type AdapterProxy struct {
+	kafkaICProxy kafka.InterContainerProxy
+	coreTopic    string
+	endpointMgr  kafka.EndpointManager
+}
+
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
+	proxy := AdapterProxy{
+		kafkaICProxy: kafkaProxy,
+		coreTopic:    coreTopic,
+		endpointMgr:  kafka.NewEndpointManager(backend),
+	}
+	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
+	return &proxy
+}
+
+func (ap *AdapterProxy) SendInterAdapterMessage(ctx context.Context,
+	msg proto.Message,
+	msgType ic.InterAdapterMessageType_Types,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string,
+	messageId string) error {
+	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	//Marshal the message
+	var marshalledMsg *any.Any
+	var err error
+	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+		return err
+	}
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return err
+	}
+
+	//Build the inter adapter message
+	header := &ic.InterAdapterHeader{
+		Type:          msgType,
+		FromTopic:     fromAdapter,
+		ToTopic:       string(endpoint),
+		ToDeviceId:    toDeviceId,
+		ProxyDeviceId: proxyDeviceId,
+	}
+	if messageId != "" {
+		header.Id = messageId
+	} else {
+		header.Id = uuid.New().String()
+	}
+	header.Timestamp = ptypes.TimestampNow()
+	iaMsg := &ic.InterAdapterMessage{
+		Header: header,
+		Body:   marshalledMsg,
+	}
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: iaMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_inter_adapter_message"
+
+	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
+	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *AdapterProxy) TechProfileInstanceRequest(ctx context.Context,
+	tpPath string,
+	parentPonPort uint32,
+	onuID uint32,
+	uniID uint32,
+	fromAdapter string,
+	toAdapter string,
+	toDeviceId string,
+	proxyDeviceId string) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	logger.Debugw(ctx, "sending-tech-profile-instance-request-message", log.Fields{"from": fromAdapter,
+		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
+		return nil, err
+	}
+
+	//Build the inter adapter message
+	tpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{
+		TpInstancePath: tpPath,
+		ParentDeviceId: toDeviceId,
+		ParentPonPort:  parentPonPort,
+		OnuId:          onuID,
+		UniId:          uniID,
+	}
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "msg",
+		Value: tpReqMsg,
+	}
+
+	topic := kafka.Topic{Name: string(endpoint)}
+	replyToTopic := kafka.Topic{Name: fromAdapter}
+	rpc := "process_tech_profile_instance_request"
+
+	ctx = context.WithValue(ctx, "inter-adapter-tp-req-msg", tpPath)
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	if success {
+		tpDwnldMsg := &ic.InterAdapterTechProfileDownloadMessage{}
+		if err := ptypes.UnmarshalAny(result, tpDwnldMsg); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, err
+		}
+		return tpDwnldMsg, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "TechProfileInstanceRequest-return", log.Fields{"tpPath": tpPath, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
index 5d7d7f8..98085bb 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/common.go
@@ -16,7 +16,7 @@
 package common
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
index 1077226..589d951 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/core_proxy.go
@@ -21,8 +21,8 @@
 
 	"github.com/golang/protobuf/ptypes"
 	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 	"google.golang.org/grpc/codes"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/performance_metrics.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
index b6cf1c0..90f575b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/request_handler.go
@@ -21,10 +21,10 @@
 
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters"
-	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/extension"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"github.com/opencord/voltha-protos/v4/go/openflow_13"
@@ -559,6 +559,37 @@
 	return new(empty.Empty), nil
 }
 
+func (rhp *RequestHandlerProxy) Process_tech_profile_instance_request(ctx context.Context, args []*ic.Argument) (*ic.InterAdapterTechProfileDownloadMessage, error) {
+	if len(args) < 2 {
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		err := errors.New("invalid-number-of-args")
+		return nil, err
+	}
+	iaTpReqMsg := &ic.InterAdapterTechProfileInstanceRequestMessage{}
+	transactionID := &ic.StrType{}
+	for _, arg := range args {
+		switch arg.Key {
+		case "msg":
+			if err := ptypes.UnmarshalAny(arg.Value, iaTpReqMsg); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				return nil, err
+			}
+		case kafka.TransactionKey:
+			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				return nil, err
+			}
+		}
+	}
+
+	logger.Debugw(ctx, "Process_tech_profile_instance_request", log.Fields{"tpPath": iaTpReqMsg.TpInstancePath})
+
+	//Invoke the tech profile instance request
+	tpInst := rhp.adapter.Process_tech_profile_instance_request(ctx, iaTpReqMsg)
+
+	return tpInst, nil
+}
+
 func (rhp *RequestHandlerProxy) Download_image(ctx context.Context, args []*ic.Argument) (*voltha.ImageDownload, error) {
 	device, image, err := unMarshalImageDowload(args, ctx)
 	if err != nil {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
index 65b432c..35f227e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/common/utils.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"google.golang.org/grpc/codes"
 	"math/rand"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
similarity index 96%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
index fbf2b5d..aca4271 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/adapters/iAdapter.go
@@ -46,6 +46,7 @@
 	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
 	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
 	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+	Process_tech_profile_instance_request(ctx context.Context, msg *ic.InterAdapterTechProfileInstanceRequestMessage) *ic.InterAdapterTechProfileDownloadMessage
 	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
 	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
 	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
index 294a4bd..606d18c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/common.go
@@ -16,7 +16,7 @@
 package config
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
index 8350225..f5efa36 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/configmanager.go
@@ -22,9 +22,9 @@
 	"strings"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
index 8187edc..68bfb32 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logcontroller.go
@@ -26,7 +26,7 @@
 	"crypto/md5"
 	"encoding/json"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"os"
 	"sort"
 	"strings"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
index 353ae5c..95c5bde 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/config/logfeaturescontroller.go
@@ -19,7 +19,7 @@
 import (
 	"context"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"os"
 	"strings"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
index bf30a48..ff0b5b7 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/backend.go
@@ -23,8 +23,8 @@
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
index 25cddf5..4bc92b1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/common.go
@@ -16,7 +16,7 @@
 package db
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
index b35f1f3..e4b1fff 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/client.go
@@ -79,14 +79,17 @@
 	Put(ctx context.Context, key string, value interface{}) error
 	Delete(ctx context.Context, key string) error
 	DeleteWithPrefix(ctx context.Context, prefixKey string) error
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+	IsConnectionUp(ctx context.Context) bool // timeout in second
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
+
+	// These APIs are not used.  They will be cleaned up in release Voltha 2.9.
+	// It's not cleaned now to limit changes in all components
 	Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
 	ReleaseReservation(ctx context.Context, key string) error
 	ReleaseAllReservations(ctx context.Context) error
 	RenewReservation(ctx context.Context, key string) error
-	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
 	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
 	ReleaseLock(lockName string) error
-	IsConnectionUp(ctx context.Context) bool // timeout in second
-	CloseWatch(ctx context.Context, key string, ch chan *Event)
-	Close(ctx context.Context)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
index 99c603d..b8509db 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/common.go
@@ -16,7 +16,7 @@
 package kvstore
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..96ffc2f
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,473 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+)
+
+const (
+	poolCapacityEnvName = "VOLTHA_ETCD_CLIENT_POOL_CAPACITY"
+	maxUsageEnvName     = "VOLTHA_ETCD_CLIENT_MAX_USAGE"
+)
+
+const (
+	defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
+	defaultMaxPoolUsage    = 100  // Maximum concurrent request an Etcd Client is allowed to process
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	pool               EtcdClientAllocator
+	watchedChannels    sync.Map
+	watchedClients     map[string]*v3Client.Client
+	watchedClientsLock sync.RWMutex
+}
+
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	// Get the capacity and max usage from the environment
+	capacity := defaultMaxPoolCapacity
+	maxUsage := defaultMaxPoolUsage
+	if capacityStr, present := os.LookupEnv(poolCapacityEnvName); present {
+		if val, err := strconv.Atoi(capacityStr); err == nil {
+			capacity = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"pool-capacity": capacity})
+		} else {
+			logger.Warnw(ctx, "invalid-capacity-value", log.Fields{"error": err, "capacity": capacityStr})
+		}
+	}
+	if maxUsageStr, present := os.LookupEnv(maxUsageEnvName); present {
+		if val, err := strconv.Atoi(maxUsageStr); err == nil {
+			maxUsage = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"max-usage": maxUsage})
+		} else {
+			logger.Warnw(ctx, "invalid-max-usage-value", log.Fields{"error": err, "max-usage": maxUsageStr})
+		}
+	}
+
+	var err error
+
+	pool, err := NewRoundRobinEtcdClientAllocator([]string{addr}, timeout, capacity, maxUsage, level)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-create-rr-client", log.Fields{
+			"error": err,
+		})
+	}
+
+	logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
+
+	return &EtcdClient{pool: pool,
+		watchedClients: make(map[string]*v3Client.Client),
+	}, nil
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	return NewEtcdCustomClient(ctx, addr, timeout, level)
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+		return false
+	}
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+	resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+
+startLoop:
+	for {
+		resp, err := client.Get(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return nil, err
+				}
+				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return nil, err
+		}
+
+		for _, ev := range resp.Kvs {
+			// Only one value is returned
+			return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+		}
+		return nil, nil
+	}
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var err error
+	if val, err = ToString(value); err != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Put(ctx, key, val)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		return nil
+	}
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Delete(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+		return nil
+	}
+}
+
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	//delete the prefix
+	if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+	var err error
+	// Reuse the Etcd client when multiple callees are watching the same key.
+	c.watchedClientsLock.Lock()
+	client, exist := c.watchedClients[key]
+	if !exist {
+		client, err = c.pool.Get(ctx)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-an-etcd-client", log.Fields{"key": key, "error": err})
+			c.watchedClientsLock.Unlock()
+			return nil
+		}
+		c.watchedClients[key] = client
+	}
+	c.watchedClientsLock.Unlock()
+
+	w := v3Client.NewWatcher(client)
+	ctx, cancel := context.WithCancel(ctx)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug(ctx, "channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+
+	// If we don't have any keys being watched then return the Etcd client to the pool
+	if len(channelMaps) == 0 {
+		c.watchedClientsLock.Lock()
+		// Sanity
+		if client, ok := c.watchedClients[key]; ok {
+			c.pool.Put(client)
+			delete(c.watchedClients, key)
+		}
+		c.watchedClientsLock.Unlock()
+	}
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes all the connection in the pool store client
+func (c *EtcdClient) Close(ctx context.Context) {
+	logger.Debug(ctx, "closing-etcd-pool")
+	c.pool.Close(ctx)
+}
+
+// The APIs below are not used
+var errUnimplemented = errors.New("deprecated")
+
+// Reserve is deprecated
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+	return nil, errUnimplemented
+}
+
+// ReleaseAllReservations is deprecated
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+	return errUnimplemented
+}
+
+// ReleaseReservation is deprecated
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// RenewReservation is deprecated
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// AcquireLock is deprecated
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+	return errUnimplemented
+}
+
+// ReleaseLock is deprecated
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	return errUnimplemented
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
new file mode 100644
index 0000000..6af7d3d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/etcdpool.go
@@ -0,0 +1,239 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"container/list"
+	"context"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
+	"go.etcd.io/etcd/clientv3"
+	"sync"
+	"time"
+)
+
+// EtcdClientAllocator represents a generic interface to allocate an Etcd Client
+type EtcdClientAllocator interface {
+	Get(context.Context) (*clientv3.Client, error)
+	Put(*clientv3.Client)
+	Close(ctx context.Context)
+}
+
+// NewRoundRobinEtcdClientAllocator creates a new ETCD Client Allocator using a Round Robin scheme
+func NewRoundRobinEtcdClientAllocator(endpoints []string, timeout time.Duration, capacity, maxUsage int, level log.LogLevel) (EtcdClientAllocator, error) {
+	return &roundRobin{
+		all:       make(map[*clientv3.Client]*rrEntry),
+		full:      make(map[*clientv3.Client]*rrEntry),
+		waitList:  list.New(),
+		max:       maxUsage,
+		capacity:  capacity,
+		timeout:   timeout,
+		endpoints: endpoints,
+		logLevel:  level,
+		closingCh: make(chan struct{}, capacity*maxUsage),
+		stopCh:    make(chan struct{}),
+	}, nil
+}
+
+type rrEntry struct {
+	client *clientv3.Client
+	count  int
+	age    time.Time
+}
+
+type roundRobin struct {
+	//block chan struct{}
+	sync.Mutex
+	available []*rrEntry
+	all       map[*clientv3.Client]*rrEntry
+	full      map[*clientv3.Client]*rrEntry
+	waitList  *list.List
+	max       int
+	capacity  int
+	timeout   time.Duration
+	//ageOut    time.Duration
+	endpoints []string
+	size      int
+	logLevel  log.LogLevel
+	closing   bool
+	closingCh chan struct{}
+	stopCh    chan struct{}
+}
+
+// Get returns an Etcd client. If not is available, it will create one
+// until the maximum allowed capacity.  If maximum capacity has been
+// reached then it will wait until s used one is freed.
+func (r *roundRobin) Get(ctx context.Context) (*clientv3.Client, error) {
+	r.Lock()
+
+	if r.closing {
+		r.Unlock()
+		return nil, errors.New("pool-is-closing")
+	}
+
+	// first determine if we need to block, which would mean the
+	// available queue is empty and we are at capacity
+	if len(r.available) == 0 && r.size >= r.capacity {
+
+		// create a channel on which to wait and
+		// add it to the list
+		ch := make(chan struct{})
+		element := r.waitList.PushBack(ch)
+		r.Unlock()
+
+		// block until it is our turn or context
+		// expires or is canceled
+		select {
+		case <-r.stopCh:
+			logger.Info(ctx, "stop-waiting-pool-is-closing")
+			r.waitList.Remove(element)
+			return nil, errors.New("stop-waiting-pool-is-closing")
+		case <-ch:
+			r.waitList.Remove(element)
+		case <-ctx.Done():
+			r.waitList.Remove(element)
+			return nil, ctx.Err()
+		}
+		r.Lock()
+	}
+
+	defer r.Unlock()
+	if len(r.available) > 0 {
+		// pull off back end as it is operationally quicker
+		last := len(r.available) - 1
+		entry := r.available[last]
+		entry.count++
+		if entry.count >= r.max {
+			r.available = r.available[:last]
+			r.full[entry.client] = entry
+		}
+		entry.age = time.Now()
+		return entry.client, nil
+	}
+
+	logConfig := log.ConstructZapConfig(log.JSON, r.logLevel, log.Fields{})
+	// increase capacity
+	client, err := clientv3.New(clientv3.Config{
+		Endpoints:   r.endpoints,
+		DialTimeout: r.timeout,
+		LogConfig:   &logConfig,
+	})
+	if err != nil {
+		return nil, err
+	}
+	entry := &rrEntry{
+		client: client,
+		count:  1,
+	}
+	r.all[entry.client] = entry
+
+	if r.max > 1 {
+		r.available = append(r.available, entry)
+	} else {
+		r.full[entry.client] = entry
+	}
+	r.size++
+	return client, nil
+}
+
+// Put returns the Etcd Client back to the pool
+func (r *roundRobin) Put(client *clientv3.Client) {
+	r.Lock()
+
+	entry := r.all[client]
+	entry.count--
+
+	if r.closing {
+		// Close client if count is 0
+		if entry.count == 0 {
+			if err := entry.client.Close(); err != nil {
+				logger.Warnw(context.Background(), "error-closing-client", log.Fields{"error": err})
+			}
+			delete(r.all, entry.client)
+		}
+		// Notify Close function that a client was returned to the pool
+		r.closingCh <- struct{}{}
+		r.Unlock()
+		return
+	}
+
+	// This entry is now available for use, so
+	// if in full map add it to available and
+	// remove from full
+	if _, ok := r.full[client]; ok {
+		r.available = append(r.available, entry)
+		delete(r.full, client)
+	}
+
+	front := r.waitList.Front()
+	if front != nil {
+		ch := r.waitList.Remove(front)
+		r.Unlock()
+		// need to unblock if someone is waiting
+		ch.(chan struct{}) <- struct{}{}
+		return
+	}
+	r.Unlock()
+}
+
+func (r *roundRobin) Close(ctx context.Context) {
+	r.Lock()
+	r.closing = true
+
+	// Notify anyone waiting for a client to stop waiting
+	close(r.stopCh)
+
+	// Clean-up unused clients
+	for i := 0; i < len(r.available); i++ {
+		// Count 0 means no one is using that client
+		if r.available[i].count == 0 {
+			if err := r.available[i].client.Close(); err != nil {
+				logger.Warnw(ctx, "failure-closing-client", log.Fields{"client": r.available[i].client, "error": err})
+			}
+			// Remove client for all list
+			delete(r.all, r.available[i].client)
+		}
+	}
+
+	// Figure out how many clients are in use
+	numberInUse := 0
+	for _, rrEntry := range r.all {
+		numberInUse += rrEntry.count
+	}
+	r.Unlock()
+
+	if numberInUse == 0 {
+		logger.Info(ctx, "no-connection-in-use")
+		return
+	}
+
+	logger.Infow(ctx, "waiting-for-clients-return", log.Fields{"count": numberInUse})
+
+	// Wait for notifications when a client is returned to the pool
+	for {
+		select {
+		case <-r.closingCh:
+			numberInUse--
+			if numberInUse == 0 {
+				logger.Info(ctx, "all-connections-closed")
+				return
+			}
+		case <-ctx.Done():
+			logger.Warnw(ctx, "context-done", log.Fields{"error": ctx.Err()})
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
similarity index 65%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
index 70bd977..ca57542 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore/kvutils.go
@@ -16,8 +16,18 @@
 package kvstore
 
 import (
-	"bytes"
+	"context"
 	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+const (
+	minRetryInterval  = 100
+	maxRetryInterval  = 5000
+	incrementalFactor = 1.2
+	jitter            = 0.2
 )
 
 // ToString converts an interface value to a string.  The interface should either be of
@@ -46,13 +56,23 @@
 	}
 }
 
-// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
-// string types
-func isEqual(val1 interface{}, val2 interface{}) bool {
-	b1, err := ToByte(val1)
-	b2, er := ToByte(val2)
-	if err == nil && er == nil {
-		return bytes.Equal(b1, b2)
+// backoff waits an amount of time that is proportional to the attempt value.  The wait time in a range of
+// minRetryInterval and maxRetryInterval.
+func backoff(ctx context.Context, attempt int) error {
+	if attempt == 0 {
+		return nil
 	}
-	return val1 == val2
+	backoff := int(minRetryInterval + incrementalFactor*math.Exp(float64(attempt)))
+	backoff *= 1 + int(jitter*(rand.Float64()*2-1))
+	if backoff > maxRetryInterval {
+		backoff = maxRetryInterval
+	}
+	ticker := time.NewTicker(time.Duration(backoff) * time.Millisecond)
+	defer ticker.Stop()
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-ticker.C:
+	}
+	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
index 489a493..df3e839 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/common.go
@@ -16,7 +16,7 @@
 package events
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/eventif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/eventif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/eventif/events_proxy_if.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
index 910fec3..19a4f26 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/events_proxy.go
@@ -27,9 +27,9 @@
 	"time"
 
 	"github.com/golang/protobuf/ptypes"
-	"github.com/opencord/voltha-lib-go/v4/pkg/events/eventif"
-	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/events/eventif"
+	"github.com/opencord/voltha-lib-go/v5/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/events/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/events/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
index fdc93bd..beb0574 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/common.go
@@ -16,7 +16,7 @@
 package flows
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
index 98fad49..ff6aaf0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/flows/flow_utils.go
@@ -26,7 +26,7 @@
 
 	"github.com/cevaris/ordered_map"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
 )
 
@@ -1576,3 +1576,33 @@
 	}
 	return b.Bytes()
 }
+
+func GetMeterIdFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni) or MeterId
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var meterID uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "found-metadata-for-egress/uni-port", log.Fields{"metadata": md})
+	if md != 0 {
+		meterID = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "found-meterID-in-write-metadata-action", log.Fields{"meterID": meterID})
+	}
+	return meterID
+}
+
+func SetMeterIdToFlow(flow *ofp.OfpFlowStats, meterId uint32) {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					meterInst.MeterId = meterId
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/client.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
index 5db364d..f4d7661 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/common.go
@@ -16,7 +16,7 @@
 package kafka
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
index 796eb72..962b932 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/endpoint_manager.go
@@ -21,8 +21,8 @@
 	"github.com/buraksezer/consistent"
 	"github.com/cespare/xxhash"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v4/pkg/db"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/db"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
index 3af35d7..b149e7d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/kafka_inter_container_library.go
@@ -31,7 +31,7 @@
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/any"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"github.com/opentracing/opentracing-go"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
index cd6d27b..3273470 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/sarama_client.go
@@ -29,7 +29,7 @@
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 )
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/kafka/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/common.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/log.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/log/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
index d9739af..119d78e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/common.go
@@ -16,7 +16,7 @@
 package probe
 
 import (
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 )
 
 var logger log.CLogger
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
index f13f257..b66f398 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/probe/probe.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-lib-go/v5/pkg/log"
 	"net/http"
 	"sync"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go
rename to vendor/github.com/opencord/voltha-lib-go/v5/pkg/version/version.go
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
index 8caf8ad..403d9e6 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
@@ -10,6 +10,7 @@
 	timestamp "github.com/golang/protobuf/ptypes/timestamp"
 	common "github.com/opencord/voltha-protos/v4/go/common"
 	openflow_13 "github.com/opencord/voltha-protos/v4/go/openflow_13"
+	tech_profile "github.com/opencord/voltha-protos/v4/go/tech_profile"
 	voltha "github.com/opencord/voltha-protos/v4/go/voltha"
 	math "math"
 )
@@ -1131,21 +1132,100 @@
 	return nil
 }
 
-type InterAdapterTechProfileDownloadMessage struct {
-	UniId                uint32   `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	Path                 string   `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"`
+type InterAdapterTechProfileInstanceRequestMessage struct {
+	TpInstancePath       string   `protobuf:"bytes,1,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	ParentDeviceId       string   `protobuf:"bytes,2,opt,name=parent_device_id,json=parentDeviceId,proto3" json:"parent_device_id,omitempty"`
+	ParentPonPort        uint32   `protobuf:"varint,3,opt,name=parent_pon_port,json=parentPonPort,proto3" json:"parent_pon_port,omitempty"`
+	OnuId                uint32   `protobuf:"varint,4,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	UniId                uint32   `protobuf:"varint,5,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
 	XXX_sizecache        int32    `json:"-"`
 }
 
+func (m *InterAdapterTechProfileInstanceRequestMessage) Reset() {
+	*m = InterAdapterTechProfileInstanceRequestMessage{}
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) String() string {
+	return proto.CompactTextString(m)
+}
+func (*InterAdapterTechProfileInstanceRequestMessage) ProtoMessage() {}
+func (*InterAdapterTechProfileInstanceRequestMessage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_941f0031a549667f, []int{16}
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Unmarshal(m, b)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Marshal(b, m, deterministic)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Merge(m, src)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_Size() int {
+	return xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.Size(m)
+}
+func (m *InterAdapterTechProfileInstanceRequestMessage) XXX_DiscardUnknown() {
+	xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InterAdapterTechProfileInstanceRequestMessage proto.InternalMessageInfo
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetTpInstancePath() string {
+	if m != nil {
+		return m.TpInstancePath
+	}
+	return ""
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentDeviceId() string {
+	if m != nil {
+		return m.ParentDeviceId
+	}
+	return ""
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetParentPonPort() uint32 {
+	if m != nil {
+		return m.ParentPonPort
+	}
+	return 0
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *InterAdapterTechProfileInstanceRequestMessage) GetUniId() uint32 {
+	if m != nil {
+		return m.UniId
+	}
+	return 0
+}
+
+type InterAdapterTechProfileDownloadMessage struct {
+	UniId          uint32 `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
+	TpInstancePath string `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
+	// Types that are valid to be assigned to TechTpInstance:
+	//	*InterAdapterTechProfileDownloadMessage_TpInstance
+	//	*InterAdapterTechProfileDownloadMessage_EponTpInstance
+	TechTpInstance       isInterAdapterTechProfileDownloadMessage_TechTpInstance `protobuf_oneof:"tech_tp_instance"`
+	XXX_NoUnkeyedLiteral struct{}                                                `json:"-"`
+	XXX_unrecognized     []byte                                                  `json:"-"`
+	XXX_sizecache        int32                                                   `json:"-"`
+}
+
 func (m *InterAdapterTechProfileDownloadMessage) Reset() {
 	*m = InterAdapterTechProfileDownloadMessage{}
 }
 func (m *InterAdapterTechProfileDownloadMessage) String() string { return proto.CompactTextString(m) }
 func (*InterAdapterTechProfileDownloadMessage) ProtoMessage()    {}
 func (*InterAdapterTechProfileDownloadMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{16}
+	return fileDescriptor_941f0031a549667f, []int{17}
 }
 
 func (m *InterAdapterTechProfileDownloadMessage) XXX_Unmarshal(b []byte) error {
@@ -1173,16 +1253,63 @@
 	return 0
 }
 
-func (m *InterAdapterTechProfileDownloadMessage) GetPath() string {
+func (m *InterAdapterTechProfileDownloadMessage) GetTpInstancePath() string {
 	if m != nil {
-		return m.Path
+		return m.TpInstancePath
 	}
 	return ""
 }
 
+type isInterAdapterTechProfileDownloadMessage_TechTpInstance interface {
+	isInterAdapterTechProfileDownloadMessage_TechTpInstance()
+}
+
+type InterAdapterTechProfileDownloadMessage_TpInstance struct {
+	TpInstance *tech_profile.TechProfileInstance `protobuf:"bytes,3,opt,name=tp_instance,json=tpInstance,proto3,oneof"`
+}
+
+type InterAdapterTechProfileDownloadMessage_EponTpInstance struct {
+	EponTpInstance *tech_profile.EponTechProfileInstance `protobuf:"bytes,4,opt,name=epon_tp_instance,json=eponTpInstance,proto3,oneof"`
+}
+
+func (*InterAdapterTechProfileDownloadMessage_TpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
+}
+
+func (*InterAdapterTechProfileDownloadMessage_EponTpInstance) isInterAdapterTechProfileDownloadMessage_TechTpInstance() {
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetTechTpInstance() isInterAdapterTechProfileDownloadMessage_TechTpInstance {
+	if m != nil {
+		return m.TechTpInstance
+	}
+	return nil
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetTpInstance() *tech_profile.TechProfileInstance {
+	if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_TpInstance); ok {
+		return x.TpInstance
+	}
+	return nil
+}
+
+func (m *InterAdapterTechProfileDownloadMessage) GetEponTpInstance() *tech_profile.EponTechProfileInstance {
+	if x, ok := m.GetTechTpInstance().(*InterAdapterTechProfileDownloadMessage_EponTpInstance); ok {
+		return x.EponTpInstance
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*InterAdapterTechProfileDownloadMessage) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*InterAdapterTechProfileDownloadMessage_TpInstance)(nil),
+		(*InterAdapterTechProfileDownloadMessage_EponTpInstance)(nil),
+	}
+}
+
 type InterAdapterDeleteGemPortMessage struct {
 	UniId                uint32   `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	TpPath               string   `protobuf:"bytes,2,opt,name=tp_path,json=tpPath,proto3" json:"tp_path,omitempty"`
+	TpInstancePath       string   `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
 	GemPortId            uint32   `protobuf:"varint,3,opt,name=gem_port_id,json=gemPortId,proto3" json:"gem_port_id,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
@@ -1193,7 +1320,7 @@
 func (m *InterAdapterDeleteGemPortMessage) String() string { return proto.CompactTextString(m) }
 func (*InterAdapterDeleteGemPortMessage) ProtoMessage()    {}
 func (*InterAdapterDeleteGemPortMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{17}
+	return fileDescriptor_941f0031a549667f, []int{18}
 }
 
 func (m *InterAdapterDeleteGemPortMessage) XXX_Unmarshal(b []byte) error {
@@ -1221,9 +1348,9 @@
 	return 0
 }
 
-func (m *InterAdapterDeleteGemPortMessage) GetTpPath() string {
+func (m *InterAdapterDeleteGemPortMessage) GetTpInstancePath() string {
 	if m != nil {
-		return m.TpPath
+		return m.TpInstancePath
 	}
 	return ""
 }
@@ -1237,7 +1364,7 @@
 
 type InterAdapterDeleteTcontMessage struct {
 	UniId                uint32   `protobuf:"varint,1,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	TpPath               string   `protobuf:"bytes,2,opt,name=tp_path,json=tpPath,proto3" json:"tp_path,omitempty"`
+	TpInstancePath       string   `protobuf:"bytes,2,opt,name=tp_instance_path,json=tpInstancePath,proto3" json:"tp_instance_path,omitempty"`
 	AllocId              uint32   `protobuf:"varint,3,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
 	XXX_NoUnkeyedLiteral struct{} `json:"-"`
 	XXX_unrecognized     []byte   `json:"-"`
@@ -1248,7 +1375,7 @@
 func (m *InterAdapterDeleteTcontMessage) String() string { return proto.CompactTextString(m) }
 func (*InterAdapterDeleteTcontMessage) ProtoMessage()    {}
 func (*InterAdapterDeleteTcontMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{18}
+	return fileDescriptor_941f0031a549667f, []int{19}
 }
 
 func (m *InterAdapterDeleteTcontMessage) XXX_Unmarshal(b []byte) error {
@@ -1276,9 +1403,9 @@
 	return 0
 }
 
-func (m *InterAdapterDeleteTcontMessage) GetTpPath() string {
+func (m *InterAdapterDeleteTcontMessage) GetTpInstancePath() string {
 	if m != nil {
-		return m.TpPath
+		return m.TpInstancePath
 	}
 	return ""
 }
@@ -1305,7 +1432,7 @@
 func (m *InterAdapterResponseBody) String() string { return proto.CompactTextString(m) }
 func (*InterAdapterResponseBody) ProtoMessage()    {}
 func (*InterAdapterResponseBody) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{19}
+	return fileDescriptor_941f0031a549667f, []int{20}
 }
 
 func (m *InterAdapterResponseBody) XXX_Unmarshal(b []byte) error {
@@ -1390,7 +1517,7 @@
 func (m *InterAdapterMessage) String() string { return proto.CompactTextString(m) }
 func (*InterAdapterMessage) ProtoMessage()    {}
 func (*InterAdapterMessage) Descriptor() ([]byte, []int) {
-	return fileDescriptor_941f0031a549667f, []int{20}
+	return fileDescriptor_941f0031a549667f, []int{21}
 }
 
 func (m *InterAdapterMessage) XXX_Unmarshal(b []byte) error {
@@ -1445,6 +1572,7 @@
 	proto.RegisterType((*InterAdapterMessageType)(nil), "voltha.InterAdapterMessageType")
 	proto.RegisterType((*InterAdapterHeader)(nil), "voltha.InterAdapterHeader")
 	proto.RegisterType((*InterAdapterOmciMessage)(nil), "voltha.InterAdapterOmciMessage")
+	proto.RegisterType((*InterAdapterTechProfileInstanceRequestMessage)(nil), "voltha.InterAdapterTechProfileInstanceRequestMessage")
 	proto.RegisterType((*InterAdapterTechProfileDownloadMessage)(nil), "voltha.InterAdapterTechProfileDownloadMessage")
 	proto.RegisterType((*InterAdapterDeleteGemPortMessage)(nil), "voltha.InterAdapterDeleteGemPortMessage")
 	proto.RegisterType((*InterAdapterDeleteTcontMessage)(nil), "voltha.InterAdapterDeleteTcontMessage")
@@ -1457,88 +1585,97 @@
 }
 
 var fileDescriptor_941f0031a549667f = []byte{
-	// 1328 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0x44,
-	0x14, 0xae, 0xff, 0xed, 0xe3, 0xc4, 0x75, 0x37, 0x4d, 0xe3, 0x24, 0xfd, 0x09, 0xa2, 0x94, 0xd0,
-	0x82, 0x33, 0xb8, 0x30, 0xc0, 0x15, 0x38, 0xb6, 0xda, 0x68, 0xc6, 0xb1, 0x8d, 0xec, 0xb4, 0x0c,
-	0xc3, 0x8c, 0x46, 0x91, 0x36, 0xb6, 0x26, 0xb2, 0x56, 0x5d, 0xad, 0x53, 0x74, 0xc3, 0x0c, 0x77,
-	0xbc, 0x04, 0x33, 0x5c, 0xf1, 0x0e, 0xbc, 0x06, 0x4f, 0xc4, 0xec, 0x8f, 0x6c, 0xd9, 0x6d, 0xe8,
-	0x50, 0xee, 0x74, 0xce, 0xf7, 0xed, 0x39, 0xda, 0x73, 0xf6, 0x7c, 0xbb, 0xf0, 0xe1, 0x15, 0xf1,
-	0xd9, 0xd4, 0xb6, 0x42, 0x4a, 0x18, 0x89, 0x8e, 0xbc, 0x80, 0x61, 0x6a, 0x39, 0x24, 0x60, 0xb6,
-	0x17, 0x60, 0xda, 0x14, 0x6e, 0x54, 0x94, 0xa4, 0xbd, 0xbd, 0x55, 0xb2, 0x43, 0x66, 0x33, 0x12,
-	0x48, 0xce, 0x3a, 0x26, 0x2d, 0x85, 0xed, 0x4e, 0x08, 0x99, 0xf8, 0xf8, 0x48, 0x58, 0xe7, 0xf3,
-	0x8b, 0x23, 0x3b, 0x88, 0x15, 0xf4, 0x60, 0x75, 0x19, 0x09, 0x71, 0x70, 0xe1, 0x93, 0xd7, 0xd6,
-	0xe7, 0x4f, 0x15, 0x41, 0x5b, 0x25, 0xf8, 0x64, 0xe2, 0x39, 0xb6, 0x6f, 0xb9, 0xf8, 0xca, 0x73,
-	0x70, 0x12, 0x64, 0x3d, 0x3e, 0xf3, 0x66, 0x38, 0x62, 0xf6, 0x2c, 0x94, 0x04, 0x6d, 0x1f, 0x4a,
-	0x23, 0x46, 0xc7, 0x71, 0x88, 0x51, 0x1d, 0x72, 0x57, 0xb6, 0xdf, 0xc8, 0x1c, 0x64, 0x0e, 0x2b,
-	0x26, 0xff, 0xe4, 0xa0, 0x11, 0xb0, 0x75, 0x30, 0x27, 0xc1, 0xbb, 0x50, 0x3e, 0x26, 0xc4, 0x5f,
-	0x47, 0xcb, 0x12, 0xd5, 0xa0, 0x38, 0xb4, 0x9d, 0x4b, 0xcc, 0x50, 0x03, 0x4a, 0xa1, 0x1d, 0xfb,
-	0xc4, 0x76, 0x05, 0xbe, 0x61, 0x26, 0xa6, 0xf6, 0x13, 0x54, 0x74, 0x4a, 0x09, 0xed, 0x10, 0x17,
-	0x6b, 0x03, 0x28, 0x38, 0xc4, 0xc5, 0x11, 0xda, 0x81, 0xad, 0xb3, 0xfe, 0xe8, 0x6c, 0x38, 0x1c,
-	0x98, 0x63, 0xbd, 0x6b, 0x99, 0xfa, 0xf7, 0x67, 0xfa, 0x68, 0x5c, 0xbf, 0x81, 0xee, 0x00, 0x32,
-	0xfa, 0x2f, 0xda, 0x3d, 0xa3, 0x6b, 0x0d, 0xdb, 0x66, 0xfb, 0x54, 0x1f, 0xeb, 0xe6, 0xa8, 0x9e,
-	0x41, 0xdb, 0x70, 0xab, 0xab, 0xb7, 0xbb, 0x3d, 0xa3, 0xaf, 0x5b, 0xfa, 0x0f, 0x1d, 0x5d, 0xef,
-	0xea, 0xdd, 0x7a, 0x56, 0xeb, 0x41, 0x41, 0x44, 0x47, 0x4f, 0x20, 0xcf, 0x23, 0x8b, 0xec, 0xb5,
-	0xd6, 0x4e, 0x53, 0x35, 0x60, 0x91, 0xba, 0x29, 0xf2, 0x9a, 0x82, 0x84, 0xee, 0x40, 0x91, 0x62,
-	0x3b, 0x22, 0x41, 0x23, 0x2b, 0xea, 0xa0, 0x2c, 0xed, 0xef, 0x0c, 0x14, 0x4f, 0xb0, 0xed, 0x62,
-	0x8a, 0x6a, 0x90, 0xf5, 0x5c, 0x55, 0xa6, 0xac, 0xe7, 0xa2, 0x8f, 0x21, 0xcf, 0xe2, 0x10, 0x8b,
-	0x05, 0xb5, 0xd6, 0x56, 0x12, 0xff, 0x14, 0x47, 0x91, 0x3d, 0xc1, 0xbc, 0x3e, 0xa6, 0x20, 0xa0,
-	0x7b, 0x00, 0x17, 0x94, 0xcc, 0x2c, 0x46, 0x42, 0xcf, 0x69, 0xe4, 0x44, 0x80, 0x0a, 0xf7, 0x8c,
-	0xb9, 0x03, 0xed, 0x42, 0x99, 0x11, 0x05, 0xe6, 0x05, 0x58, 0x62, 0x44, 0x42, 0xfb, 0x50, 0xb9,
-	0xc4, 0xb1, 0xc2, 0x0a, 0x02, 0x2b, 0x5f, 0xe2, 0x58, 0x82, 0x5f, 0x43, 0x65, 0xd1, 0xd5, 0x46,
-	0xf1, 0x20, 0x73, 0x58, 0x6d, 0xed, 0x35, 0x65, 0xdf, 0x9b, 0x49, 0xdf, 0x9b, 0xe3, 0x84, 0x61,
-	0x2e, 0xc9, 0xda, 0x09, 0x94, 0xdb, 0x74, 0x32, 0x9f, 0xe1, 0x80, 0xf1, 0x16, 0x5e, 0xe2, 0x38,
-	0xe9, 0xfe, 0x25, 0x8e, 0xd1, 0x63, 0x28, 0x5c, 0xd9, 0xfe, 0x5c, 0x6e, 0xac, 0xda, 0xba, 0xfd,
-	0x46, 0xcc, 0x76, 0x10, 0x9b, 0x92, 0xa2, 0x79, 0xb0, 0x6d, 0xf0, 0x01, 0xe9, 0x24, 0xf3, 0xa1,
-	0x76, 0x8f, 0x1e, 0x41, 0x71, 0x2a, 0xca, 0x26, 0x22, 0x57, 0x5b, 0xb5, 0xa4, 0x3c, 0xb2, 0x98,
-	0xa6, 0x42, 0xd1, 0x21, 0xe4, 0xcf, 0x89, 0x1b, 0xff, 0x6b, 0x2e, 0xc1, 0xd0, 0xfe, 0xcc, 0xc0,
-	0xee, 0x6a, 0x2e, 0x13, 0xbf, 0x9a, 0xe3, 0x88, 0x1d, 0x13, 0x37, 0xe6, 0xdb, 0xa0, 0xa1, 0xa3,
-	0x9a, 0xc7, 0x3f, 0xd1, 0x43, 0xc8, 0xdb, 0x74, 0x12, 0x35, 0x72, 0x07, 0xb9, 0xc3, 0x6a, 0xab,
-	0x9e, 0xe4, 0x4f, 0x36, 0x6e, 0x0a, 0x14, 0x3d, 0x81, 0x5b, 0x14, 0x47, 0x21, 0x09, 0x22, 0x6c,
-	0x51, 0xfc, 0x6a, 0xee, 0x51, 0xec, 0x8a, 0x2e, 0x94, 0xcd, 0x7a, 0x02, 0x98, 0xca, 0x8f, 0x1e,
-	0x42, 0x8d, 0xe2, 0xd0, 0xe7, 0x0d, 0x59, 0xe9, 0xc9, 0x86, 0xf0, 0x8e, 0x65, 0xd3, 0x34, 0x17,
-	0xf6, 0xd6, 0xff, 0x53, 0xc6, 0x11, 0x3f, 0xda, 0x80, 0x52, 0x34, 0x77, 0x1c, 0x1c, 0x45, 0x6a,
-	0x6c, 0x12, 0x13, 0x7d, 0xca, 0x8f, 0x60, 0x34, 0xf7, 0x99, 0x38, 0x22, 0xd7, 0x15, 0x43, 0x71,
-	0xb4, 0xdf, 0x32, 0x50, 0x1f, 0xbd, 0xf6, 0x98, 0x33, 0xed, 0xd8, 0xa1, 0x7d, 0xee, 0xf9, 0x1e,
-	0x8b, 0xd1, 0x27, 0x90, 0x77, 0x71, 0xe4, 0xa8, 0x9a, 0x6f, 0x37, 0xd3, 0xe2, 0x41, 0x2e, 0x42,
-	0x8b, 0x83, 0xa6, 0xa0, 0x20, 0x03, 0x6e, 0x46, 0x62, 0xb9, 0x75, 0x81, 0x6d, 0x36, 0xa7, 0x38,
-	0x52, 0x3d, 0x38, 0x78, 0x63, 0xd5, 0x1a, 0xcf, 0xac, 0x49, 0xc7, 0x33, 0x65, 0x6b, 0xbf, 0x40,
-	0xbd, 0x2b, 0xc4, 0xa7, 0xeb, 0x45, 0x0e, 0xb9, 0xc2, 0xbc, 0x54, 0xeb, 0xc3, 0xb2, 0x0f, 0x95,
-	0xd0, 0xa6, 0x38, 0x60, 0x96, 0xe7, 0xaa, 0x2e, 0x95, 0xa5, 0xc3, 0x70, 0xd1, 0x03, 0xa8, 0x4a,
-	0xf5, 0xb2, 0xc4, 0x40, 0xc9, 0x09, 0x01, 0xe9, 0x12, 0x3a, 0x73, 0x17, 0x2a, 0xe1, 0xfc, 0xdc,
-	0xf7, 0xa2, 0x29, 0xa6, 0x6a, 0x46, 0x96, 0x0e, 0xed, 0xf7, 0x2c, 0xec, 0x88, 0x8a, 0xb7, 0x5d,
-	0x3b, 0x64, 0x8b, 0x33, 0xc8, 0x57, 0x6a, 0xbf, 0x66, 0xa1, 0xc0, 0x3f, 0x22, 0x54, 0x87, 0x8d,
-	0x67, 0xbd, 0xc1, 0xcb, 0x94, 0xb0, 0xdc, 0x82, 0x4d, 0xe5, 0x19, 0x0d, 0x07, 0xfd, 0x91, 0x5e,
-	0xcf, 0x70, 0xd2, 0xe0, 0xb4, 0x63, 0x2c, 0x48, 0x59, 0x4e, 0x52, 0x1e, 0x45, 0xca, 0xa1, 0x2d,
-	0xb8, 0x79, 0xaa, 0x8f, 0x4d, 0xa3, 0x33, 0x5a, 0xf0, 0xf2, 0xe8, 0x36, 0xd4, 0x97, 0x4e, 0x45,
-	0x2d, 0x70, 0xea, 0xa0, 0x7f, 0x66, 0x19, 0xfd, 0xa5, 0xa0, 0x15, 0x39, 0x75, 0xe9, 0x54, 0xd4,
-	0x12, 0xfa, 0x00, 0xee, 0x8d, 0xf5, 0xce, 0x89, 0x35, 0x34, 0x07, 0xcf, 0x8c, 0x9e, 0x6e, 0x75,
-	0x07, 0x2f, 0xfb, 0xbd, 0x41, 0x7b, 0xb9, 0xb0, 0x8c, 0xf6, 0x61, 0xa7, 0xab, 0xf7, 0xf4, 0xb1,
-	0x6e, 0x3d, 0xd7, 0x4f, 0x2d, 0x2e, 0x94, 0x0b, 0xb0, 0x82, 0x1a, 0x70, 0x5b, 0x81, 0xe3, 0xce,
-	0xa0, 0xbf, 0x44, 0x80, 0xd7, 0x07, 0xa5, 0xeb, 0x73, 0x8d, 0x9e, 0x7d, 0xb3, 0xa2, 0x67, 0x1f,
-	0x25, 0x03, 0x73, 0x4d, 0x65, 0x9b, 0xa2, 0xaa, 0xff, 0x5b, 0xe1, 0x0e, 0x60, 0x83, 0x11, 0x75,
-	0x77, 0xf1, 0xa3, 0x21, 0x07, 0x0a, 0x18, 0x91, 0x27, 0xca, 0x70, 0xd1, 0x23, 0xb8, 0x19, 0x52,
-	0xf2, 0x73, 0x9c, 0x22, 0x15, 0x05, 0x69, 0x53, 0xb8, 0x17, 0xbc, 0x15, 0x39, 0x2c, 0xfd, 0x17,
-	0x39, 0xfc, 0x2b, 0xb3, 0x7a, 0x7e, 0x06, 0x33, 0xc7, 0x4b, 0x74, 0xac, 0x01, 0xa5, 0x99, 0xfc,
-	0x4c, 0x6e, 0x31, 0x65, 0xa2, 0x63, 0xa8, 0x39, 0x24, 0x08, 0xb0, 0xc3, 0xac, 0x88, 0xd9, 0x6c,
-	0x1e, 0xa9, 0xc2, 0xed, 0x37, 0xd5, 0x2b, 0xa0, 0x23, 0xd1, 0x91, 0x00, 0x55, 0xb9, 0x36, 0x9d,
-	0xb4, 0x13, 0x7d, 0x07, 0x72, 0x13, 0x96, 0xed, 0xba, 0x94, 0x4b, 0x82, 0x9c, 0xfc, 0xfd, 0xa4,
-	0xf6, 0x72, 0x73, 0xcd, 0x21, 0xe7, 0xb4, 0x25, 0xc5, 0xdc, 0x08, 0x53, 0x96, 0x36, 0x82, 0x47,
-	0xe9, 0x5f, 0x1f, 0x63, 0x67, 0x3a, 0xa4, 0xe4, 0xc2, 0xf3, 0x71, 0x97, 0xbc, 0x0e, 0xf8, 0x75,
-	0x9b, 0xec, 0x64, 0x1b, 0x8a, 0xf3, 0xc0, 0xb3, 0x54, 0xcb, 0x37, 0xcd, 0xc2, 0x3c, 0xf0, 0x0c,
-	0x17, 0x21, 0xc8, 0x87, 0x36, 0x9b, 0xaa, 0x99, 0x14, 0xdf, 0x1a, 0x85, 0x83, 0x74, 0xd0, 0x2e,
-	0xf6, 0x31, 0xc3, 0xcf, 0xf1, 0x6c, 0x48, 0x28, 0x7b, 0x47, 0xb8, 0x1d, 0x28, 0xb1, 0xd0, 0x4a,
-	0x45, 0x2c, 0xb2, 0x70, 0x68, 0xb3, 0x29, 0xba, 0x0f, 0xd5, 0x09, 0x9e, 0x59, 0x21, 0xa1, 0x42,
-	0x02, 0x72, 0x62, 0x51, 0x65, 0x22, 0x83, 0x1a, 0xae, 0x76, 0x09, 0xf7, 0xdf, 0xcc, 0x39, 0xe6,
-	0xef, 0xae, 0xf7, 0xcd, 0xb8, 0x0b, 0x65, 0xdb, 0xf7, 0x89, 0xb3, 0x4c, 0x57, 0x12, 0xb6, 0xe1,
-	0x6a, 0x7f, 0x64, 0xa0, 0x91, 0xce, 0xb6, 0xa2, 0xd0, 0x77, 0xa0, 0xa8, 0x1a, 0x2a, 0x05, 0x5a,
-	0x59, 0xe8, 0xf1, 0xbb, 0xaf, 0xaa, 0x93, 0x1b, 0xf2, 0xb2, 0x42, 0x5f, 0x42, 0x9e, 0xcc, 0x1c,
-	0x4f, 0xf5, 0xf3, 0xc1, 0xdb, 0x66, 0x29, 0x75, 0xca, 0xf8, 0x32, 0x4e, 0x3f, 0xae, 0x2c, 0xde,
-	0x4c, 0x5a, 0x04, 0x5b, 0x6f, 0x99, 0x3c, 0xd4, 0x5a, 0xbb, 0x57, 0xf7, 0xde, 0x16, 0xfa, 0x7d,
-	0xef, 0xd8, 0xc7, 0xdf, 0x42, 0x35, 0x35, 0xe2, 0xa8, 0x0a, 0xa5, 0xa5, 0x5a, 0x6e, 0x40, 0x39,
-	0x25, 0x94, 0xe2, 0xf1, 0xf5, 0xc2, 0xe8, 0xe8, 0x56, 0xd7, 0x18, 0x75, 0x06, 0x2f, 0x74, 0x93,
-	0x3f, 0xbe, 0x8e, 0xfb, 0xb0, 0x45, 0xe8, 0x44, 0xdc, 0x20, 0x0e, 0xa1, 0xae, 0xfa, 0xb9, 0x1f,
-	0xbf, 0x9a, 0x78, 0x6c, 0x3a, 0x3f, 0xe7, 0x93, 0x71, 0x94, 0x60, 0xea, 0x41, 0xfc, 0x59, 0xf2,
-	0x3c, 0xfe, 0xe2, 0x68, 0x42, 0xd6, 0x5f, 0xdb, 0xc3, 0x1b, 0xc3, 0xcc, 0x30, 0x7f, 0x5e, 0x14,
-	0x9c, 0xa7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63, 0xbd, 0xf4, 0x0f, 0x9b, 0x0b, 0x00, 0x00,
+	// 1468 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xdd, 0x72, 0xdb, 0x44,
+	0x14, 0x8e, 0x1d, 0xff, 0x1e, 0x27, 0xae, 0xbb, 0x69, 0x1a, 0x27, 0xe9, 0x4f, 0x2a, 0xda, 0x12,
+	0x5a, 0xea, 0x0c, 0x29, 0x0c, 0x70, 0x05, 0x8e, 0xad, 0x36, 0x9a, 0x49, 0x6c, 0x55, 0x56, 0x5a,
+	0x86, 0x61, 0x46, 0xa3, 0x48, 0x1b, 0x5b, 0x13, 0x59, 0xab, 0x4a, 0xab, 0x14, 0xcf, 0x30, 0xcc,
+	0x30, 0xdc, 0xf0, 0x04, 0xdc, 0x31, 0xc3, 0x15, 0xef, 0xc0, 0x6b, 0x70, 0xc7, 0xdb, 0x30, 0xfb,
+	0x23, 0x5b, 0x76, 0x13, 0x18, 0xa0, 0x77, 0xda, 0xf3, 0x7d, 0xfb, 0xed, 0xee, 0x39, 0x7b, 0xce,
+	0x1e, 0xc1, 0x7b, 0x17, 0xc4, 0xa7, 0x23, 0xdb, 0x0a, 0x23, 0x42, 0x49, 0xbc, 0xe7, 0x05, 0x14,
+	0x47, 0x96, 0x43, 0x02, 0x6a, 0x7b, 0x01, 0x8e, 0x5a, 0xdc, 0x8c, 0x4a, 0x82, 0xb4, 0xb5, 0x35,
+	0x4f, 0x76, 0xc8, 0x78, 0x4c, 0x02, 0xc1, 0x59, 0xc4, 0xc4, 0x48, 0x62, 0x9b, 0x43, 0x42, 0x86,
+	0x3e, 0xde, 0xe3, 0xa3, 0xd3, 0xe4, 0x6c, 0xcf, 0x0e, 0x26, 0x12, 0xba, 0x3b, 0x3f, 0x8d, 0x84,
+	0x38, 0x38, 0xf3, 0xc9, 0x1b, 0xeb, 0xa3, 0xa7, 0x92, 0xa0, 0xcc, 0x13, 0x7c, 0x32, 0xf4, 0x1c,
+	0xdb, 0xb7, 0x5c, 0x7c, 0xe1, 0x39, 0x38, 0x15, 0x59, 0xd4, 0xa7, 0xde, 0x18, 0xc7, 0xd4, 0x1e,
+	0x87, 0x92, 0xb0, 0x33, 0x2f, 0x42, 0xb1, 0x33, 0x62, 0xdf, 0x67, 0x9e, 0x2f, 0x25, 0x94, 0x6d,
+	0x28, 0x0f, 0x68, 0x64, 0x4e, 0x42, 0x8c, 0x1a, 0xb0, 0x7c, 0x61, 0xfb, 0xcd, 0xdc, 0x4e, 0x6e,
+	0xb7, 0x6a, 0xb0, 0x4f, 0x06, 0x6a, 0x01, 0x5d, 0x04, 0x97, 0x05, 0x78, 0x0b, 0x2a, 0x07, 0x84,
+	0xf8, 0x8b, 0x68, 0x45, 0xa0, 0x0a, 0x94, 0x74, 0xdb, 0x39, 0xc7, 0x14, 0x35, 0xa1, 0x1c, 0xda,
+	0x13, 0x9f, 0xd8, 0x2e, 0xc7, 0x57, 0x8c, 0x74, 0xa8, 0x7c, 0x03, 0x55, 0x35, 0x8a, 0x48, 0xd4,
+	0x21, 0x2e, 0x56, 0xfa, 0x50, 0x74, 0x88, 0x8b, 0x63, 0xb4, 0x01, 0x6b, 0x27, 0xbd, 0xc1, 0x89,
+	0xae, 0xf7, 0x0d, 0x53, 0xed, 0x5a, 0x86, 0xfa, 0xe2, 0x44, 0x1d, 0x98, 0x8d, 0x25, 0x74, 0x13,
+	0x90, 0xd6, 0x7b, 0xd9, 0x3e, 0xd2, 0xba, 0x96, 0xde, 0x36, 0xda, 0xc7, 0xaa, 0xa9, 0x1a, 0x83,
+	0x46, 0x0e, 0xad, 0xc3, 0xf5, 0xae, 0xda, 0xee, 0x1e, 0x69, 0x3d, 0xd5, 0x52, 0xbf, 0xea, 0xa8,
+	0x6a, 0x57, 0xed, 0x36, 0xf2, 0xca, 0x11, 0x14, 0xb9, 0x3a, 0x7a, 0x0c, 0x05, 0xa6, 0xcc, 0x57,
+	0xaf, 0xef, 0x6f, 0xb4, 0x64, 0x88, 0xa6, 0x4b, 0xb7, 0xf8, 0xba, 0x06, 0x27, 0xa1, 0x9b, 0x50,
+	0x8a, 0xb0, 0x1d, 0x93, 0xa0, 0x99, 0xe7, 0x7e, 0x90, 0x23, 0xe5, 0x8f, 0x1c, 0x94, 0x0e, 0xb1,
+	0xed, 0xe2, 0x08, 0xd5, 0x21, 0xef, 0xb9, 0xd2, 0x4d, 0x79, 0xcf, 0x45, 0xef, 0x43, 0x81, 0x4e,
+	0x42, 0xcc, 0x27, 0xd4, 0xf7, 0xd7, 0x52, 0xfd, 0x63, 0x1c, 0xc7, 0xf6, 0x10, 0x33, 0xff, 0x18,
+	0x9c, 0x80, 0x6e, 0x03, 0x9c, 0x45, 0x64, 0x6c, 0x51, 0x12, 0x7a, 0x4e, 0x73, 0x99, 0x0b, 0x54,
+	0x99, 0xc5, 0x64, 0x06, 0xb4, 0x09, 0x15, 0x4a, 0x24, 0x58, 0xe0, 0x60, 0x99, 0x12, 0x01, 0x6d,
+	0x43, 0xf5, 0x1c, 0x4f, 0x24, 0x56, 0xe4, 0x58, 0xe5, 0x1c, 0x4f, 0x04, 0xf8, 0x19, 0x54, 0xa7,
+	0x71, 0x6f, 0x96, 0x76, 0x72, 0xbb, 0xb5, 0xfd, 0xad, 0x96, 0xb8, 0x19, 0xad, 0xf4, 0x66, 0xb4,
+	0xcc, 0x94, 0x61, 0xcc, 0xc8, 0xca, 0x21, 0x54, 0xda, 0xd1, 0x30, 0x19, 0xe3, 0x80, 0xb2, 0x10,
+	0x9e, 0xe3, 0x49, 0x1a, 0xfd, 0x73, 0x3c, 0x41, 0x8f, 0xa0, 0x78, 0x61, 0xfb, 0x89, 0x38, 0x58,
+	0x6d, 0xff, 0xc6, 0x5b, 0x9a, 0xed, 0x60, 0x62, 0x08, 0x8a, 0xe2, 0xc1, 0xba, 0xc6, 0x52, 0xa8,
+	0x93, 0x66, 0x90, 0x3c, 0x3d, 0x7a, 0x08, 0xa5, 0x11, 0x77, 0x1b, 0x57, 0xae, 0xed, 0xd7, 0x53,
+	0xf7, 0x08, 0x67, 0x1a, 0x12, 0x45, 0xbb, 0x50, 0x38, 0x25, 0xee, 0xe4, 0x6f, 0xd7, 0xe2, 0x0c,
+	0xe5, 0xb7, 0x1c, 0x6c, 0xce, 0xaf, 0x65, 0xe0, 0xd7, 0x09, 0x8e, 0xe9, 0x01, 0x71, 0x27, 0xec,
+	0x18, 0x51, 0xe8, 0xc8, 0xe0, 0xb1, 0x4f, 0x74, 0x1f, 0x0a, 0x76, 0x34, 0x8c, 0x9b, 0xcb, 0x3b,
+	0xcb, 0xbb, 0xb5, 0xfd, 0x46, 0xba, 0x7e, 0x7a, 0x70, 0x83, 0xa3, 0xe8, 0x31, 0x5c, 0x8f, 0x70,
+	0x1c, 0x92, 0x20, 0xc6, 0x56, 0x84, 0x5f, 0x27, 0x5e, 0x84, 0x5d, 0x1e, 0x85, 0x8a, 0xd1, 0x48,
+	0x01, 0x43, 0xda, 0xd1, 0x7d, 0xa8, 0x47, 0x38, 0xf4, 0x59, 0x40, 0xe6, 0x62, 0xb2, 0xc2, 0xad,
+	0xa6, 0x08, 0x9a, 0xe2, 0xc2, 0xd6, 0xe2, 0x3e, 0x85, 0x0e, 0xdf, 0x68, 0x13, 0xca, 0x71, 0xe2,
+	0x38, 0x38, 0x8e, 0x65, 0xda, 0xa4, 0x43, 0xf4, 0x21, 0xbb, 0x82, 0x71, 0xe2, 0x53, 0x7e, 0x45,
+	0xae, 0x72, 0x86, 0xe4, 0x28, 0x3f, 0xe5, 0xa0, 0x31, 0x78, 0xe3, 0x51, 0x67, 0xd4, 0xb1, 0x43,
+	0xfb, 0xd4, 0xf3, 0x3d, 0x3a, 0x41, 0x1f, 0x40, 0xc1, 0xc5, 0xb1, 0x23, 0x7d, 0xbe, 0xde, 0xca,
+	0x96, 0x17, 0x72, 0x16, 0x5a, 0x0c, 0x34, 0x38, 0x05, 0x69, 0x70, 0x2d, 0xe6, 0xd3, 0xad, 0x33,
+	0x6c, 0xd3, 0x24, 0xc2, 0xb1, 0x8c, 0xc1, 0xce, 0x5b, 0xb3, 0x16, 0x78, 0x46, 0x5d, 0x18, 0x9e,
+	0xc9, 0xb1, 0xf2, 0x3d, 0x34, 0xba, 0xbc, 0x3c, 0x75, 0xbd, 0xd8, 0x21, 0x17, 0x98, 0xb9, 0x6a,
+	0x31, 0x59, 0xb6, 0xa1, 0x1a, 0xda, 0x11, 0x0e, 0xa8, 0xe5, 0xb9, 0x32, 0x4a, 0x15, 0x61, 0xd0,
+	0x5c, 0x74, 0x17, 0x6a, 0xa2, 0xbe, 0x59, 0x3c, 0xa1, 0x44, 0x86, 0x80, 0x30, 0xf1, 0x3a, 0x73,
+	0x0b, 0xaa, 0x61, 0x72, 0xea, 0x7b, 0xf1, 0x08, 0x47, 0x32, 0x47, 0x66, 0x06, 0xe5, 0x97, 0x3c,
+	0x6c, 0x70, 0x8f, 0xb7, 0x5d, 0x3b, 0xa4, 0xd3, 0x3b, 0xc8, 0x66, 0x2a, 0x3f, 0xe4, 0xa1, 0xc8,
+	0x3e, 0x62, 0xd4, 0x80, 0x95, 0x67, 0x47, 0xfd, 0x57, 0x99, 0xc2, 0x72, 0x1d, 0x56, 0xa5, 0x65,
+	0xa0, 0xf7, 0x7b, 0x03, 0xb5, 0x91, 0x63, 0xa4, 0xfe, 0x71, 0x47, 0x9b, 0x92, 0xf2, 0x8c, 0x24,
+	0x2d, 0x92, 0xb4, 0x8c, 0xd6, 0xe0, 0xda, 0xb1, 0x6a, 0x1a, 0x5a, 0x67, 0x30, 0xe5, 0x15, 0xd0,
+	0x0d, 0x68, 0xcc, 0x8c, 0x92, 0x5a, 0x64, 0xd4, 0x7e, 0xef, 0xc4, 0xd2, 0x7a, 0xb3, 0x82, 0x56,
+	0x62, 0xd4, 0x99, 0x51, 0x52, 0xcb, 0xe8, 0x1e, 0xdc, 0x36, 0xd5, 0xce, 0xa1, 0xa5, 0x1b, 0xfd,
+	0x67, 0xda, 0x91, 0x6a, 0x75, 0xfb, 0xaf, 0x7a, 0x47, 0xfd, 0xf6, 0x6c, 0x62, 0x05, 0x6d, 0xc3,
+	0x46, 0x57, 0x3d, 0x52, 0x4d, 0xd5, 0x7a, 0xae, 0x1e, 0x5b, 0xac, 0x50, 0x4e, 0xc1, 0x2a, 0x6a,
+	0xc2, 0x0d, 0x09, 0x9a, 0x9d, 0x7e, 0x6f, 0x86, 0x00, 0xf3, 0x0f, 0xca, 0xfa, 0xe7, 0x8a, 0x7a,
+	0xf6, 0xf9, 0x5c, 0x3d, 0x7b, 0x90, 0x26, 0xcc, 0x15, 0x9e, 0x6d, 0x71, 0xaf, 0xfe, 0xef, 0x0a,
+	0xb7, 0x03, 0x2b, 0x94, 0xc8, 0xd7, 0x8d, 0x5d, 0x0d, 0x91, 0x50, 0x40, 0x89, 0xb8, 0x51, 0x9a,
+	0x8b, 0x1e, 0xc2, 0xb5, 0x30, 0x22, 0xdf, 0x4e, 0x32, 0xa4, 0x12, 0x27, 0xad, 0x72, 0xf3, 0x94,
+	0x37, 0x57, 0x0e, 0xcb, 0xff, 0xa6, 0x1c, 0xfe, 0x9e, 0x9b, 0xbf, 0x3f, 0xfd, 0xb1, 0xe3, 0xa5,
+	0x75, 0xac, 0x09, 0xe5, 0xb1, 0xf8, 0x4c, 0x5f, 0x31, 0x39, 0x44, 0x07, 0x50, 0x77, 0x48, 0x10,
+	0x60, 0x87, 0x5a, 0x31, 0xb5, 0x69, 0x12, 0x4b, 0xc7, 0x6d, 0xb7, 0x64, 0x9f, 0xd0, 0x11, 0xe8,
+	0x80, 0x83, 0xd2, 0x5d, 0xab, 0x4e, 0xd6, 0x88, 0xbe, 0x04, 0x71, 0x08, 0xcb, 0x76, 0xdd, 0x88,
+	0x95, 0x04, 0x91, 0xf9, 0xdb, 0xa9, 0xef, 0xc5, 0xe1, 0x5a, 0x3a, 0xe3, 0xb4, 0x05, 0xc5, 0x58,
+	0x09, 0x33, 0x23, 0xe5, 0xcf, 0x1c, 0x3c, 0xc9, 0xee, 0xdd, 0xc4, 0xce, 0x48, 0x17, 0x2f, 0xbd,
+	0x16, 0xc4, 0xd4, 0x0e, 0x1c, 0x2c, 0xcb, 0x64, 0x7a, 0xa2, 0x5d, 0x68, 0xd0, 0xd0, 0xf2, 0x24,
+	0x68, 0x85, 0x36, 0x1d, 0xc9, 0x4b, 0x50, 0xa7, 0x61, 0x3a, 0x47, 0xb7, 0xe9, 0x88, 0x31, 0x65,
+	0xce, 0xce, 0x5c, 0x2f, 0x52, 0xb7, 0x2e, 0xec, 0x73, 0x31, 0x12, 0xcc, 0x90, 0x04, 0x56, 0x48,
+	0x22, 0x51, 0xc3, 0x56, 0x8d, 0x55, 0x61, 0xd6, 0x49, 0xa0, 0x93, 0x88, 0xa2, 0x75, 0x28, 0x91,
+	0x20, 0x61, 0x3a, 0x05, 0x0e, 0x17, 0x49, 0x90, 0x68, 0x2e, 0x33, 0x27, 0x81, 0x97, 0x86, 0x7f,
+	0xd5, 0x28, 0x26, 0x81, 0xa7, 0xb9, 0xca, 0xcf, 0x79, 0x78, 0x78, 0xc5, 0xd9, 0xba, 0xe4, 0x4d,
+	0xc0, 0x7a, 0x89, 0xf4, 0x50, 0x33, 0x85, 0x5c, 0x46, 0xe1, 0xd2, 0xb3, 0xe6, 0x2f, 0x3d, 0x6b,
+	0x17, 0x6a, 0x19, 0xa6, 0x8c, 0xc3, 0xbd, 0xd6, 0x5c, 0xe7, 0x74, 0x89, 0x6f, 0x0f, 0x97, 0x0c,
+	0x98, 0x29, 0xa1, 0x17, 0xd0, 0xc0, 0xcc, 0x03, 0x59, 0xa9, 0x02, 0x97, 0x7a, 0x30, 0x2f, 0xa5,
+	0x86, 0x24, 0xb8, 0x5c, 0xae, 0xce, 0x04, 0xcc, 0xa9, 0xe4, 0x01, 0x82, 0x06, 0x9f, 0x99, 0x91,
+	0x54, 0x7e, 0xcc, 0xc1, 0x4e, 0xd6, 0x31, 0x5d, 0xec, 0x63, 0x8a, 0x9f, 0xe3, 0x31, 0x73, 0xf2,
+	0x3b, 0x73, 0xc9, 0x1d, 0xa8, 0x0d, 0xf1, 0x98, 0x47, 0x93, 0xa9, 0x88, 0x80, 0x56, 0x87, 0x62,
+	0x15, 0xcd, 0x55, 0xbe, 0x83, 0x3b, 0x6f, 0x6f, 0xc2, 0x64, 0xbd, 0xf4, 0x3b, 0xdb, 0xc2, 0x26,
+	0x54, 0x6c, 0xdf, 0x27, 0xce, 0x6c, 0xfd, 0x32, 0x1f, 0x6b, 0xae, 0xf2, 0x6b, 0x0e, 0x9a, 0xd9,
+	0xe5, 0xe7, 0x1e, 0xd9, 0x9b, 0x50, 0x92, 0x39, 0x29, 0xde, 0x58, 0x39, 0x42, 0x8f, 0xfe, 0xb9,
+	0xdb, 0x38, 0x5c, 0x12, 0xfd, 0x06, 0xfa, 0x04, 0x0a, 0x64, 0xec, 0x78, 0xf2, 0x2a, 0xdc, 0xbd,
+	0xac, 0x1c, 0x66, 0x0a, 0x05, 0x9b, 0xc6, 0xe8, 0x07, 0xd5, 0x69, 0xdb, 0xab, 0xc4, 0xb0, 0x76,
+	0x49, 0xf1, 0x44, 0xfb, 0x0b, 0xad, 0xd1, 0xd6, 0x65, 0xd2, 0xff, 0xb5, 0x4d, 0x7a, 0xf4, 0x05,
+	0xd4, 0x32, 0x55, 0x1a, 0xd5, 0xa0, 0x3c, 0x7b, 0xf0, 0x56, 0xa0, 0x92, 0x79, 0xeb, 0x78, 0xff,
+	0xfc, 0x52, 0xeb, 0xa8, 0x56, 0x57, 0x1b, 0x74, 0xfa, 0x2f, 0x55, 0x83, 0xf5, 0xcf, 0x07, 0x3d,
+	0x58, 0x23, 0xd1, 0x90, 0x37, 0x01, 0x0e, 0x89, 0x5c, 0xb9, 0xb9, 0xaf, 0x3f, 0x1d, 0x7a, 0x74,
+	0x94, 0x9c, 0xb2, 0xe2, 0xb6, 0x97, 0x62, 0xf2, 0xaf, 0xe7, 0x49, 0xfa, 0x0f, 0xf4, 0xf1, 0xde,
+	0x90, 0x2c, 0xfe, 0x52, 0xe9, 0x4b, 0x7a, 0x4e, 0x2f, 0x9c, 0x96, 0x38, 0xe7, 0xe9, 0x5f, 0x01,
+	0x00, 0x00, 0xff, 0xff, 0xb3, 0x9d, 0x9e, 0x3e, 0x80, 0x0d, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
index 02c4c27..2c8af68 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/openolt/openolt.pb.go
@@ -61,6 +61,36 @@
 // TrafficQueues from public import voltha_protos/tech_profile.proto
 type TrafficQueues = tech_profile.TrafficQueues
 
+// InstanceControl from public import voltha_protos/tech_profile.proto
+type InstanceControl = tech_profile.InstanceControl
+
+// QThresholds from public import voltha_protos/tech_profile.proto
+type QThresholds = tech_profile.QThresholds
+
+// GemPortAttributes from public import voltha_protos/tech_profile.proto
+type GemPortAttributes = tech_profile.GemPortAttributes
+
+// SchedulerAttributes from public import voltha_protos/tech_profile.proto
+type SchedulerAttributes = tech_profile.SchedulerAttributes
+
+// EPONQueueAttributes from public import voltha_protos/tech_profile.proto
+type EPONQueueAttributes = tech_profile.EPONQueueAttributes
+
+// TechProfile from public import voltha_protos/tech_profile.proto
+type TechProfile = tech_profile.TechProfile
+
+// EponTechProfile from public import voltha_protos/tech_profile.proto
+type EponTechProfile = tech_profile.EponTechProfile
+
+// TechProfileInstance from public import voltha_protos/tech_profile.proto
+type TechProfileInstance = tech_profile.TechProfileInstance
+
+// EponTechProfileInstance from public import voltha_protos/tech_profile.proto
+type EponTechProfileInstance = tech_profile.EponTechProfileInstance
+
+// ResourceInstance from public import voltha_protos/tech_profile.proto
+type ResourceInstance = tech_profile.ResourceInstance
+
 // Direction from public import voltha_protos/tech_profile.proto
 type Direction = tech_profile.Direction
 
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
index d60ed83..fc70ea8 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/tech_profile/tech_profile.pb.go
@@ -893,6 +893,1033 @@
 	return 0
 }
 
+type InstanceControl struct {
+	Onu                  string   `protobuf:"bytes,1,opt,name=onu,proto3" json:"onu,omitempty"`
+	Uni                  string   `protobuf:"bytes,2,opt,name=uni,proto3" json:"uni,omitempty"`
+	MaxGemPayloadSize    string   `protobuf:"bytes,3,opt,name=max_gem_payload_size,json=maxGemPayloadSize,proto3" json:"max_gem_payload_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *InstanceControl) Reset()         { *m = InstanceControl{} }
+func (m *InstanceControl) String() string { return proto.CompactTextString(m) }
+func (*InstanceControl) ProtoMessage()    {}
+func (*InstanceControl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{10}
+}
+
+func (m *InstanceControl) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_InstanceControl.Unmarshal(m, b)
+}
+func (m *InstanceControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_InstanceControl.Marshal(b, m, deterministic)
+}
+func (m *InstanceControl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InstanceControl.Merge(m, src)
+}
+func (m *InstanceControl) XXX_Size() int {
+	return xxx_messageInfo_InstanceControl.Size(m)
+}
+func (m *InstanceControl) XXX_DiscardUnknown() {
+	xxx_messageInfo_InstanceControl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InstanceControl proto.InternalMessageInfo
+
+func (m *InstanceControl) GetOnu() string {
+	if m != nil {
+		return m.Onu
+	}
+	return ""
+}
+
+func (m *InstanceControl) GetUni() string {
+	if m != nil {
+		return m.Uni
+	}
+	return ""
+}
+
+func (m *InstanceControl) GetMaxGemPayloadSize() string {
+	if m != nil {
+		return m.MaxGemPayloadSize
+	}
+	return ""
+}
+
+type QThresholds struct {
+	QThreshold1          uint32   `protobuf:"varint,1,opt,name=q_threshold1,json=qThreshold1,proto3" json:"q_threshold1,omitempty"`
+	QThreshold2          uint32   `protobuf:"varint,2,opt,name=q_threshold2,json=qThreshold2,proto3" json:"q_threshold2,omitempty"`
+	QThreshold3          uint32   `protobuf:"varint,3,opt,name=q_threshold3,json=qThreshold3,proto3" json:"q_threshold3,omitempty"`
+	QThreshold4          uint32   `protobuf:"varint,4,opt,name=q_threshold4,json=qThreshold4,proto3" json:"q_threshold4,omitempty"`
+	QThreshold5          uint32   `protobuf:"varint,5,opt,name=q_threshold5,json=qThreshold5,proto3" json:"q_threshold5,omitempty"`
+	QThreshold6          uint32   `protobuf:"varint,6,opt,name=q_threshold6,json=qThreshold6,proto3" json:"q_threshold6,omitempty"`
+	QThreshold7          uint32   `protobuf:"varint,7,opt,name=q_threshold7,json=qThreshold7,proto3" json:"q_threshold7,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *QThresholds) Reset()         { *m = QThresholds{} }
+func (m *QThresholds) String() string { return proto.CompactTextString(m) }
+func (*QThresholds) ProtoMessage()    {}
+func (*QThresholds) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{11}
+}
+
+func (m *QThresholds) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_QThresholds.Unmarshal(m, b)
+}
+func (m *QThresholds) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_QThresholds.Marshal(b, m, deterministic)
+}
+func (m *QThresholds) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_QThresholds.Merge(m, src)
+}
+func (m *QThresholds) XXX_Size() int {
+	return xxx_messageInfo_QThresholds.Size(m)
+}
+func (m *QThresholds) XXX_DiscardUnknown() {
+	xxx_messageInfo_QThresholds.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_QThresholds proto.InternalMessageInfo
+
+func (m *QThresholds) GetQThreshold1() uint32 {
+	if m != nil {
+		return m.QThreshold1
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold2() uint32 {
+	if m != nil {
+		return m.QThreshold2
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold3() uint32 {
+	if m != nil {
+		return m.QThreshold3
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold4() uint32 {
+	if m != nil {
+		return m.QThreshold4
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold5() uint32 {
+	if m != nil {
+		return m.QThreshold5
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold6() uint32 {
+	if m != nil {
+		return m.QThreshold6
+	}
+	return 0
+}
+
+func (m *QThresholds) GetQThreshold7() uint32 {
+	if m != nil {
+		return m.QThreshold7
+	}
+	return 0
+}
+
+type GemPortAttributes struct {
+	GemportId                uint32            `protobuf:"fixed32,1,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	MaxQSize                 string            `protobuf:"bytes,2,opt,name=max_q_size,json=maxQSize,proto3" json:"max_q_size,omitempty"`
+	PbitMap                  string            `protobuf:"bytes,3,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+	AesEncryption            string            `protobuf:"bytes,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+	SchedulingPolicy         SchedulingPolicy  `protobuf:"varint,5,opt,name=scheduling_policy,json=schedulingPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"scheduling_policy,omitempty"`
+	PriorityQ                uint32            `protobuf:"fixed32,6,opt,name=priority_q,json=priorityQ,proto3" json:"priority_q,omitempty"`
+	Weight                   uint32            `protobuf:"fixed32,7,opt,name=weight,proto3" json:"weight,omitempty"`
+	DiscardPolicy            DiscardPolicy     `protobuf:"varint,8,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	DiscardConfig            *RedDiscardConfig `protobuf:"bytes,9,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+	DiscardConfigV2          *DiscardConfig    `protobuf:"bytes,14,opt,name=discard_config_v2,json=discardConfigV2,proto3" json:"discard_config_v2,omitempty"`
+	IsMulticast              string            `protobuf:"bytes,10,opt,name=is_multicast,json=isMulticast,proto3" json:"is_multicast,omitempty"`
+	MulticastGemId           uint32            `protobuf:"fixed32,11,opt,name=multicast_gem_id,json=multicastGemId,proto3" json:"multicast_gem_id,omitempty"`
+	DynamicAccessControlList string            `protobuf:"bytes,12,opt,name=dynamic_access_control_list,json=dynamicAccessControlList,proto3" json:"dynamic_access_control_list,omitempty"`
+	StaticAccessControlList  string            `protobuf:"bytes,13,opt,name=static_access_control_list,json=staticAccessControlList,proto3" json:"static_access_control_list,omitempty"`
+	XXX_NoUnkeyedLiteral     struct{}          `json:"-"`
+	XXX_unrecognized         []byte            `json:"-"`
+	XXX_sizecache            int32             `json:"-"`
+}
+
+func (m *GemPortAttributes) Reset()         { *m = GemPortAttributes{} }
+func (m *GemPortAttributes) String() string { return proto.CompactTextString(m) }
+func (*GemPortAttributes) ProtoMessage()    {}
+func (*GemPortAttributes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{12}
+}
+
+func (m *GemPortAttributes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GemPortAttributes.Unmarshal(m, b)
+}
+func (m *GemPortAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GemPortAttributes.Marshal(b, m, deterministic)
+}
+func (m *GemPortAttributes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GemPortAttributes.Merge(m, src)
+}
+func (m *GemPortAttributes) XXX_Size() int {
+	return xxx_messageInfo_GemPortAttributes.Size(m)
+}
+func (m *GemPortAttributes) XXX_DiscardUnknown() {
+	xxx_messageInfo_GemPortAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GemPortAttributes proto.InternalMessageInfo
+
+func (m *GemPortAttributes) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *GemPortAttributes) GetMaxQSize() string {
+	if m != nil {
+		return m.MaxQSize
+	}
+	return ""
+}
+
+func (m *GemPortAttributes) GetPbitMap() string {
+	if m != nil {
+		return m.PbitMap
+	}
+	return ""
+}
+
+func (m *GemPortAttributes) GetAesEncryption() string {
+	if m != nil {
+		return m.AesEncryption
+	}
+	return ""
+}
+
+func (m *GemPortAttributes) GetSchedulingPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedulingPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+func (m *GemPortAttributes) GetPriorityQ() uint32 {
+	if m != nil {
+		return m.PriorityQ
+	}
+	return 0
+}
+
+func (m *GemPortAttributes) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *GemPortAttributes) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+func (m *GemPortAttributes) GetDiscardConfig() *RedDiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+func (m *GemPortAttributes) GetDiscardConfigV2() *DiscardConfig {
+	if m != nil {
+		return m.DiscardConfigV2
+	}
+	return nil
+}
+
+func (m *GemPortAttributes) GetIsMulticast() string {
+	if m != nil {
+		return m.IsMulticast
+	}
+	return ""
+}
+
+func (m *GemPortAttributes) GetMulticastGemId() uint32 {
+	if m != nil {
+		return m.MulticastGemId
+	}
+	return 0
+}
+
+func (m *GemPortAttributes) GetDynamicAccessControlList() string {
+	if m != nil {
+		return m.DynamicAccessControlList
+	}
+	return ""
+}
+
+func (m *GemPortAttributes) GetStaticAccessControlList() string {
+	if m != nil {
+		return m.StaticAccessControlList
+	}
+	return ""
+}
+
+type SchedulerAttributes struct {
+	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
+	AllocId              uint32           `protobuf:"varint,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	AdditionalBw         AdditionalBW     `protobuf:"varint,3,opt,name=additional_bw,json=additionalBw,proto3,enum=tech_profile.AdditionalBW" json:"additional_bw,omitempty"`
+	Priority             uint32           `protobuf:"fixed32,4,opt,name=priority,proto3" json:"priority,omitempty"`
+	Weight               uint32           `protobuf:"fixed32,5,opt,name=weight,proto3" json:"weight,omitempty"`
+	QSchedPolicy         SchedulingPolicy `protobuf:"varint,6,opt,name=q_sched_policy,json=qSchedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"q_sched_policy,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SchedulerAttributes) Reset()         { *m = SchedulerAttributes{} }
+func (m *SchedulerAttributes) String() string { return proto.CompactTextString(m) }
+func (*SchedulerAttributes) ProtoMessage()    {}
+func (*SchedulerAttributes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{13}
+}
+
+func (m *SchedulerAttributes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SchedulerAttributes.Unmarshal(m, b)
+}
+func (m *SchedulerAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SchedulerAttributes.Marshal(b, m, deterministic)
+}
+func (m *SchedulerAttributes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SchedulerAttributes.Merge(m, src)
+}
+func (m *SchedulerAttributes) XXX_Size() int {
+	return xxx_messageInfo_SchedulerAttributes.Size(m)
+}
+func (m *SchedulerAttributes) XXX_DiscardUnknown() {
+	xxx_messageInfo_SchedulerAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SchedulerAttributes proto.InternalMessageInfo
+
+func (m *SchedulerAttributes) GetDirection() Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return Direction_UPSTREAM
+}
+
+func (m *SchedulerAttributes) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *SchedulerAttributes) GetAdditionalBw() AdditionalBW {
+	if m != nil {
+		return m.AdditionalBw
+	}
+	return AdditionalBW_AdditionalBW_None
+}
+
+func (m *SchedulerAttributes) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *SchedulerAttributes) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *SchedulerAttributes) GetQSchedPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.QSchedPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+type EPONQueueAttributes struct {
+	MaxQSize                  string            `protobuf:"bytes,1,opt,name=max_q_size,json=maxQSize,proto3" json:"max_q_size,omitempty"`
+	PbitMap                   string            `protobuf:"bytes,2,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
+	GemportId                 uint32            `protobuf:"varint,3,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
+	AesEncryption             string            `protobuf:"bytes,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
+	TrafficType               string            `protobuf:"bytes,5,opt,name=traffic_type,json=trafficType,proto3" json:"traffic_type,omitempty"`
+	UnsolicitedGrantSize      uint32            `protobuf:"varint,6,opt,name=unsolicited_grant_size,json=unsolicitedGrantSize,proto3" json:"unsolicited_grant_size,omitempty"`
+	NominalInterval           uint32            `protobuf:"varint,7,opt,name=nominal_interval,json=nominalInterval,proto3" json:"nominal_interval,omitempty"`
+	ToleratedPollJitter       uint32            `protobuf:"varint,8,opt,name=tolerated_poll_jitter,json=toleratedPollJitter,proto3" json:"tolerated_poll_jitter,omitempty"`
+	RequestTransmissionPolicy uint32            `protobuf:"varint,9,opt,name=request_transmission_policy,json=requestTransmissionPolicy,proto3" json:"request_transmission_policy,omitempty"`
+	NumQSets                  uint32            `protobuf:"varint,10,opt,name=num_q_sets,json=numQSets,proto3" json:"num_q_sets,omitempty"`
+	QThresholds               *QThresholds      `protobuf:"bytes,11,opt,name=q_thresholds,json=qThresholds,proto3" json:"q_thresholds,omitempty"`
+	SchedulingPolicy          SchedulingPolicy  `protobuf:"varint,12,opt,name=scheduling_policy,json=schedulingPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"scheduling_policy,omitempty"`
+	PriorityQ                 uint32            `protobuf:"varint,13,opt,name=priority_q,json=priorityQ,proto3" json:"priority_q,omitempty"`
+	Weight                    uint32            `protobuf:"varint,14,opt,name=weight,proto3" json:"weight,omitempty"`
+	DiscardPolicy             DiscardPolicy     `protobuf:"varint,15,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
+	DiscardConfig             *RedDiscardConfig `protobuf:"bytes,16,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
+	DiscardConfigV2           *DiscardConfig    `protobuf:"bytes,17,opt,name=discard_config_v2,json=discardConfigV2,proto3" json:"discard_config_v2,omitempty"`
+	XXX_NoUnkeyedLiteral      struct{}          `json:"-"`
+	XXX_unrecognized          []byte            `json:"-"`
+	XXX_sizecache             int32             `json:"-"`
+}
+
+func (m *EPONQueueAttributes) Reset()         { *m = EPONQueueAttributes{} }
+func (m *EPONQueueAttributes) String() string { return proto.CompactTextString(m) }
+func (*EPONQueueAttributes) ProtoMessage()    {}
+func (*EPONQueueAttributes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{14}
+}
+
+func (m *EPONQueueAttributes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EPONQueueAttributes.Unmarshal(m, b)
+}
+func (m *EPONQueueAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EPONQueueAttributes.Marshal(b, m, deterministic)
+}
+func (m *EPONQueueAttributes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EPONQueueAttributes.Merge(m, src)
+}
+func (m *EPONQueueAttributes) XXX_Size() int {
+	return xxx_messageInfo_EPONQueueAttributes.Size(m)
+}
+func (m *EPONQueueAttributes) XXX_DiscardUnknown() {
+	xxx_messageInfo_EPONQueueAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EPONQueueAttributes proto.InternalMessageInfo
+
+func (m *EPONQueueAttributes) GetMaxQSize() string {
+	if m != nil {
+		return m.MaxQSize
+	}
+	return ""
+}
+
+func (m *EPONQueueAttributes) GetPbitMap() string {
+	if m != nil {
+		return m.PbitMap
+	}
+	return ""
+}
+
+func (m *EPONQueueAttributes) GetGemportId() uint32 {
+	if m != nil {
+		return m.GemportId
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetAesEncryption() string {
+	if m != nil {
+		return m.AesEncryption
+	}
+	return ""
+}
+
+func (m *EPONQueueAttributes) GetTrafficType() string {
+	if m != nil {
+		return m.TrafficType
+	}
+	return ""
+}
+
+func (m *EPONQueueAttributes) GetUnsolicitedGrantSize() uint32 {
+	if m != nil {
+		return m.UnsolicitedGrantSize
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetNominalInterval() uint32 {
+	if m != nil {
+		return m.NominalInterval
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetToleratedPollJitter() uint32 {
+	if m != nil {
+		return m.ToleratedPollJitter
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetRequestTransmissionPolicy() uint32 {
+	if m != nil {
+		return m.RequestTransmissionPolicy
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetNumQSets() uint32 {
+	if m != nil {
+		return m.NumQSets
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetQThresholds() *QThresholds {
+	if m != nil {
+		return m.QThresholds
+	}
+	return nil
+}
+
+func (m *EPONQueueAttributes) GetSchedulingPolicy() SchedulingPolicy {
+	if m != nil {
+		return m.SchedulingPolicy
+	}
+	return SchedulingPolicy_WRR
+}
+
+func (m *EPONQueueAttributes) GetPriorityQ() uint32 {
+	if m != nil {
+		return m.PriorityQ
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *EPONQueueAttributes) GetDiscardPolicy() DiscardPolicy {
+	if m != nil {
+		return m.DiscardPolicy
+	}
+	return DiscardPolicy_TailDrop
+}
+
+func (m *EPONQueueAttributes) GetDiscardConfig() *RedDiscardConfig {
+	if m != nil {
+		return m.DiscardConfig
+	}
+	return nil
+}
+
+func (m *EPONQueueAttributes) GetDiscardConfigV2() *DiscardConfig {
+	if m != nil {
+		return m.DiscardConfigV2
+	}
+	return nil
+}
+
+// TechProfile definition (relevant for GPON, XGPON and XGS-PON technologies)
+type TechProfile struct {
+	Name                           string               `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version                        uint32               `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+	ProfileType                    string               `protobuf:"bytes,3,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+	NumGemPorts                    uint32               `protobuf:"varint,4,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+	InstanceControl                *InstanceControl     `protobuf:"bytes,5,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+	UsScheduler                    *SchedulerAttributes `protobuf:"bytes,6,opt,name=us_scheduler,json=usScheduler,proto3" json:"us_scheduler,omitempty"`
+	DsScheduler                    *SchedulerAttributes `protobuf:"bytes,7,opt,name=ds_scheduler,json=dsScheduler,proto3" json:"ds_scheduler,omitempty"`
+	UpstreamGemPortAttributeList   []*GemPortAttributes `protobuf:"bytes,8,rep,name=upstream_gem_port_attribute_list,json=upstreamGemPortAttributeList,proto3" json:"upstream_gem_port_attribute_list,omitempty"`
+	DownstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,9,rep,name=downstream_gem_port_attribute_list,json=downstreamGemPortAttributeList,proto3" json:"downstream_gem_port_attribute_list,omitempty"`
+	XXX_NoUnkeyedLiteral           struct{}             `json:"-"`
+	XXX_unrecognized               []byte               `json:"-"`
+	XXX_sizecache                  int32                `json:"-"`
+}
+
+func (m *TechProfile) Reset()         { *m = TechProfile{} }
+func (m *TechProfile) String() string { return proto.CompactTextString(m) }
+func (*TechProfile) ProtoMessage()    {}
+func (*TechProfile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{15}
+}
+
+func (m *TechProfile) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TechProfile.Unmarshal(m, b)
+}
+func (m *TechProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TechProfile.Marshal(b, m, deterministic)
+}
+func (m *TechProfile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TechProfile.Merge(m, src)
+}
+func (m *TechProfile) XXX_Size() int {
+	return xxx_messageInfo_TechProfile.Size(m)
+}
+func (m *TechProfile) XXX_DiscardUnknown() {
+	xxx_messageInfo_TechProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfile proto.InternalMessageInfo
+
+func (m *TechProfile) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *TechProfile) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *TechProfile) GetProfileType() string {
+	if m != nil {
+		return m.ProfileType
+	}
+	return ""
+}
+
+func (m *TechProfile) GetNumGemPorts() uint32 {
+	if m != nil {
+		return m.NumGemPorts
+	}
+	return 0
+}
+
+func (m *TechProfile) GetInstanceControl() *InstanceControl {
+	if m != nil {
+		return m.InstanceControl
+	}
+	return nil
+}
+
+func (m *TechProfile) GetUsScheduler() *SchedulerAttributes {
+	if m != nil {
+		return m.UsScheduler
+	}
+	return nil
+}
+
+func (m *TechProfile) GetDsScheduler() *SchedulerAttributes {
+	if m != nil {
+		return m.DsScheduler
+	}
+	return nil
+}
+
+func (m *TechProfile) GetUpstreamGemPortAttributeList() []*GemPortAttributes {
+	if m != nil {
+		return m.UpstreamGemPortAttributeList
+	}
+	return nil
+}
+
+func (m *TechProfile) GetDownstreamGemPortAttributeList() []*GemPortAttributes {
+	if m != nil {
+		return m.DownstreamGemPortAttributeList
+	}
+	return nil
+}
+
+// EPON TechProfile definition
+type EponTechProfile struct {
+	Name                         string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version                      uint32                 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+	ProfileType                  string                 `protobuf:"bytes,3,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+	NumGemPorts                  uint32                 `protobuf:"varint,4,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+	InstanceControl              *InstanceControl       `protobuf:"bytes,5,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+	PackageType                  string                 `protobuf:"bytes,6,opt,name=package_type,json=packageType,proto3" json:"package_type,omitempty"`
+	UpstreamQueueAttributeList   []*EPONQueueAttributes `protobuf:"bytes,7,rep,name=upstream_queue_attribute_list,json=upstreamQueueAttributeList,proto3" json:"upstream_queue_attribute_list,omitempty"`
+	DownstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,8,rep,name=downstream_queue_attribute_list,json=downstreamQueueAttributeList,proto3" json:"downstream_queue_attribute_list,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	XXX_unrecognized             []byte                 `json:"-"`
+	XXX_sizecache                int32                  `json:"-"`
+}
+
+func (m *EponTechProfile) Reset()         { *m = EponTechProfile{} }
+func (m *EponTechProfile) String() string { return proto.CompactTextString(m) }
+func (*EponTechProfile) ProtoMessage()    {}
+func (*EponTechProfile) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{16}
+}
+
+func (m *EponTechProfile) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EponTechProfile.Unmarshal(m, b)
+}
+func (m *EponTechProfile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EponTechProfile.Marshal(b, m, deterministic)
+}
+func (m *EponTechProfile) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EponTechProfile.Merge(m, src)
+}
+func (m *EponTechProfile) XXX_Size() int {
+	return xxx_messageInfo_EponTechProfile.Size(m)
+}
+func (m *EponTechProfile) XXX_DiscardUnknown() {
+	xxx_messageInfo_EponTechProfile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EponTechProfile proto.InternalMessageInfo
+
+func (m *EponTechProfile) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *EponTechProfile) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *EponTechProfile) GetProfileType() string {
+	if m != nil {
+		return m.ProfileType
+	}
+	return ""
+}
+
+func (m *EponTechProfile) GetNumGemPorts() uint32 {
+	if m != nil {
+		return m.NumGemPorts
+	}
+	return 0
+}
+
+func (m *EponTechProfile) GetInstanceControl() *InstanceControl {
+	if m != nil {
+		return m.InstanceControl
+	}
+	return nil
+}
+
+func (m *EponTechProfile) GetPackageType() string {
+	if m != nil {
+		return m.PackageType
+	}
+	return ""
+}
+
+func (m *EponTechProfile) GetUpstreamQueueAttributeList() []*EPONQueueAttributes {
+	if m != nil {
+		return m.UpstreamQueueAttributeList
+	}
+	return nil
+}
+
+func (m *EponTechProfile) GetDownstreamQueueAttributeList() []*EPONQueueAttributes {
+	if m != nil {
+		return m.DownstreamQueueAttributeList
+	}
+	return nil
+}
+
+// TechProfile Instance definition (relevant for GPON, XGPON and XGS-PON technologies)
+type TechProfileInstance struct {
+	Name                           string               `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version                        uint32               `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+	SubscriberIdentifier           string               `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+	ProfileType                    string               `protobuf:"bytes,4,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+	NumGemPorts                    uint32               `protobuf:"varint,5,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+	InstanceControl                *InstanceControl     `protobuf:"bytes,6,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+	UsScheduler                    *SchedulerAttributes `protobuf:"bytes,7,opt,name=us_scheduler,json=usScheduler,proto3" json:"us_scheduler,omitempty"`
+	DsScheduler                    *SchedulerAttributes `protobuf:"bytes,8,opt,name=ds_scheduler,json=dsScheduler,proto3" json:"ds_scheduler,omitempty"`
+	UpstreamGemPortAttributeList   []*GemPortAttributes `protobuf:"bytes,9,rep,name=upstream_gem_port_attribute_list,json=upstreamGemPortAttributeList,proto3" json:"upstream_gem_port_attribute_list,omitempty"`
+	DownstreamGemPortAttributeList []*GemPortAttributes `protobuf:"bytes,10,rep,name=downstream_gem_port_attribute_list,json=downstreamGemPortAttributeList,proto3" json:"downstream_gem_port_attribute_list,omitempty"`
+	XXX_NoUnkeyedLiteral           struct{}             `json:"-"`
+	XXX_unrecognized               []byte               `json:"-"`
+	XXX_sizecache                  int32                `json:"-"`
+}
+
+func (m *TechProfileInstance) Reset()         { *m = TechProfileInstance{} }
+func (m *TechProfileInstance) String() string { return proto.CompactTextString(m) }
+func (*TechProfileInstance) ProtoMessage()    {}
+func (*TechProfileInstance) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{17}
+}
+
+func (m *TechProfileInstance) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TechProfileInstance.Unmarshal(m, b)
+}
+func (m *TechProfileInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TechProfileInstance.Marshal(b, m, deterministic)
+}
+func (m *TechProfileInstance) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TechProfileInstance.Merge(m, src)
+}
+func (m *TechProfileInstance) XXX_Size() int {
+	return xxx_messageInfo_TechProfileInstance.Size(m)
+}
+func (m *TechProfileInstance) XXX_DiscardUnknown() {
+	xxx_messageInfo_TechProfileInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TechProfileInstance proto.InternalMessageInfo
+
+func (m *TechProfileInstance) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *TechProfileInstance) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *TechProfileInstance) GetSubscriberIdentifier() string {
+	if m != nil {
+		return m.SubscriberIdentifier
+	}
+	return ""
+}
+
+func (m *TechProfileInstance) GetProfileType() string {
+	if m != nil {
+		return m.ProfileType
+	}
+	return ""
+}
+
+func (m *TechProfileInstance) GetNumGemPorts() uint32 {
+	if m != nil {
+		return m.NumGemPorts
+	}
+	return 0
+}
+
+func (m *TechProfileInstance) GetInstanceControl() *InstanceControl {
+	if m != nil {
+		return m.InstanceControl
+	}
+	return nil
+}
+
+func (m *TechProfileInstance) GetUsScheduler() *SchedulerAttributes {
+	if m != nil {
+		return m.UsScheduler
+	}
+	return nil
+}
+
+func (m *TechProfileInstance) GetDsScheduler() *SchedulerAttributes {
+	if m != nil {
+		return m.DsScheduler
+	}
+	return nil
+}
+
+func (m *TechProfileInstance) GetUpstreamGemPortAttributeList() []*GemPortAttributes {
+	if m != nil {
+		return m.UpstreamGemPortAttributeList
+	}
+	return nil
+}
+
+func (m *TechProfileInstance) GetDownstreamGemPortAttributeList() []*GemPortAttributes {
+	if m != nil {
+		return m.DownstreamGemPortAttributeList
+	}
+	return nil
+}
+
+// EPON TechProfile Instance definition.
+type EponTechProfileInstance struct {
+	Name                         string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Version                      uint32                 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
+	SubscriberIdentifier         string                 `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+	ProfileType                  string                 `protobuf:"bytes,4,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+	NumGemPorts                  uint32                 `protobuf:"varint,5,opt,name=num_gem_ports,json=numGemPorts,proto3" json:"num_gem_ports,omitempty"`
+	AllocId                      uint32                 `protobuf:"varint,6,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	InstanceControl              *InstanceControl       `protobuf:"bytes,7,opt,name=instance_control,json=instanceControl,proto3" json:"instance_control,omitempty"`
+	PackageType                  string                 `protobuf:"bytes,8,opt,name=package_type,json=packageType,proto3" json:"package_type,omitempty"`
+	UpstreamQueueAttributeList   []*EPONQueueAttributes `protobuf:"bytes,9,rep,name=upstream_queue_attribute_list,json=upstreamQueueAttributeList,proto3" json:"upstream_queue_attribute_list,omitempty"`
+	DownstreamQueueAttributeList []*EPONQueueAttributes `protobuf:"bytes,10,rep,name=downstream_queue_attribute_list,json=downstreamQueueAttributeList,proto3" json:"downstream_queue_attribute_list,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	XXX_unrecognized             []byte                 `json:"-"`
+	XXX_sizecache                int32                  `json:"-"`
+}
+
+func (m *EponTechProfileInstance) Reset()         { *m = EponTechProfileInstance{} }
+func (m *EponTechProfileInstance) String() string { return proto.CompactTextString(m) }
+func (*EponTechProfileInstance) ProtoMessage()    {}
+func (*EponTechProfileInstance) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{18}
+}
+
+func (m *EponTechProfileInstance) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EponTechProfileInstance.Unmarshal(m, b)
+}
+func (m *EponTechProfileInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EponTechProfileInstance.Marshal(b, m, deterministic)
+}
+func (m *EponTechProfileInstance) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EponTechProfileInstance.Merge(m, src)
+}
+func (m *EponTechProfileInstance) XXX_Size() int {
+	return xxx_messageInfo_EponTechProfileInstance.Size(m)
+}
+func (m *EponTechProfileInstance) XXX_DiscardUnknown() {
+	xxx_messageInfo_EponTechProfileInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EponTechProfileInstance proto.InternalMessageInfo
+
+func (m *EponTechProfileInstance) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *EponTechProfileInstance) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *EponTechProfileInstance) GetSubscriberIdentifier() string {
+	if m != nil {
+		return m.SubscriberIdentifier
+	}
+	return ""
+}
+
+func (m *EponTechProfileInstance) GetProfileType() string {
+	if m != nil {
+		return m.ProfileType
+	}
+	return ""
+}
+
+func (m *EponTechProfileInstance) GetNumGemPorts() uint32 {
+	if m != nil {
+		return m.NumGemPorts
+	}
+	return 0
+}
+
+func (m *EponTechProfileInstance) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *EponTechProfileInstance) GetInstanceControl() *InstanceControl {
+	if m != nil {
+		return m.InstanceControl
+	}
+	return nil
+}
+
+func (m *EponTechProfileInstance) GetPackageType() string {
+	if m != nil {
+		return m.PackageType
+	}
+	return ""
+}
+
+func (m *EponTechProfileInstance) GetUpstreamQueueAttributeList() []*EPONQueueAttributes {
+	if m != nil {
+		return m.UpstreamQueueAttributeList
+	}
+	return nil
+}
+
+func (m *EponTechProfileInstance) GetDownstreamQueueAttributeList() []*EPONQueueAttributes {
+	if m != nil {
+		return m.DownstreamQueueAttributeList
+	}
+	return nil
+}
+
+// Resource Instance definition
+type ResourceInstance struct {
+	TpId                 uint32   `protobuf:"varint,1,opt,name=tp_id,json=tpId,proto3" json:"tp_id,omitempty"`
+	ProfileType          string   `protobuf:"bytes,2,opt,name=profile_type,json=profileType,proto3" json:"profile_type,omitempty"`
+	SubscriberIdentifier string   `protobuf:"bytes,3,opt,name=subscriber_identifier,json=subscriberIdentifier,proto3" json:"subscriber_identifier,omitempty"`
+	AllocId              uint32   `protobuf:"varint,4,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
+	GemportIds           []uint32 `protobuf:"varint,5,rep,packed,name=gemport_ids,json=gemportIds,proto3" json:"gemport_ids,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ResourceInstance) Reset()         { *m = ResourceInstance{} }
+func (m *ResourceInstance) String() string { return proto.CompactTextString(m) }
+func (*ResourceInstance) ProtoMessage()    {}
+func (*ResourceInstance) Descriptor() ([]byte, []int) {
+	return fileDescriptor_d019a68bffe14cae, []int{19}
+}
+
+func (m *ResourceInstance) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ResourceInstance.Unmarshal(m, b)
+}
+func (m *ResourceInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ResourceInstance.Marshal(b, m, deterministic)
+}
+func (m *ResourceInstance) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResourceInstance.Merge(m, src)
+}
+func (m *ResourceInstance) XXX_Size() int {
+	return xxx_messageInfo_ResourceInstance.Size(m)
+}
+func (m *ResourceInstance) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResourceInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResourceInstance proto.InternalMessageInfo
+
+func (m *ResourceInstance) GetTpId() uint32 {
+	if m != nil {
+		return m.TpId
+	}
+	return 0
+}
+
+func (m *ResourceInstance) GetProfileType() string {
+	if m != nil {
+		return m.ProfileType
+	}
+	return ""
+}
+
+func (m *ResourceInstance) GetSubscriberIdentifier() string {
+	if m != nil {
+		return m.SubscriberIdentifier
+	}
+	return ""
+}
+
+func (m *ResourceInstance) GetAllocId() uint32 {
+	if m != nil {
+		return m.AllocId
+	}
+	return 0
+}
+
+func (m *ResourceInstance) GetGemportIds() []uint32 {
+	if m != nil {
+		return m.GemportIds
+	}
+	return nil
+}
+
 func init() {
 	proto.RegisterEnum("tech_profile.Direction", Direction_name, Direction_value)
 	proto.RegisterEnum("tech_profile.SchedulingPolicy", SchedulingPolicy_name, SchedulingPolicy_value)
@@ -909,82 +1936,154 @@
 	proto.RegisterType((*DiscardConfig)(nil), "tech_profile.DiscardConfig")
 	proto.RegisterType((*TrafficQueue)(nil), "tech_profile.TrafficQueue")
 	proto.RegisterType((*TrafficQueues)(nil), "tech_profile.TrafficQueues")
+	proto.RegisterType((*InstanceControl)(nil), "tech_profile.InstanceControl")
+	proto.RegisterType((*QThresholds)(nil), "tech_profile.QThresholds")
+	proto.RegisterType((*GemPortAttributes)(nil), "tech_profile.GemPortAttributes")
+	proto.RegisterType((*SchedulerAttributes)(nil), "tech_profile.SchedulerAttributes")
+	proto.RegisterType((*EPONQueueAttributes)(nil), "tech_profile.EPONQueueAttributes")
+	proto.RegisterType((*TechProfile)(nil), "tech_profile.TechProfile")
+	proto.RegisterType((*EponTechProfile)(nil), "tech_profile.EponTechProfile")
+	proto.RegisterType((*TechProfileInstance)(nil), "tech_profile.TechProfileInstance")
+	proto.RegisterType((*EponTechProfileInstance)(nil), "tech_profile.EponTechProfileInstance")
+	proto.RegisterType((*ResourceInstance)(nil), "tech_profile.ResourceInstance")
 }
 
 func init() { proto.RegisterFile("voltha_protos/tech_profile.proto", fileDescriptor_d019a68bffe14cae) }
 
 var fileDescriptor_d019a68bffe14cae = []byte{
-	// 1139 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
-	0x14, 0xf6, 0xda, 0x8d, 0x7f, 0x4e, 0x6c, 0x67, 0x33, 0x25, 0xc4, 0xa4, 0x0d, 0x18, 0x97, 0xd2,
-	0xc8, 0x88, 0x18, 0x85, 0xd0, 0x9b, 0x22, 0x55, 0x76, 0x13, 0x29, 0x2b, 0xd1, 0x34, 0xdd, 0x04,
-	0x19, 0x71, 0xc1, 0x6a, 0xbd, 0x33, 0x5e, 0x8f, 0xb4, 0x9e, 0x59, 0x66, 0xc7, 0x75, 0xd2, 0x2b,
-	0x6e, 0x78, 0x0b, 0xb8, 0xe4, 0x09, 0xb8, 0x41, 0x3c, 0x0a, 0x4f, 0xc0, 0x63, 0xa0, 0x99, 0xdd,
-	0xb5, 0xbd, 0xb6, 0x49, 0xa1, 0x82, 0xbb, 0x39, 0xdf, 0x7e, 0xe7, 0xcc, 0xf9, 0x9f, 0x85, 0xe6,
-	0x2b, 0x1e, 0xc8, 0x91, 0xeb, 0x84, 0x82, 0x4b, 0x1e, 0x75, 0x24, 0xf1, 0x46, 0xea, 0x3c, 0xa4,
-	0x01, 0x39, 0xd4, 0x18, 0xaa, 0x2e, 0x62, 0x7b, 0xf7, 0x7d, 0xce, 0xfd, 0x80, 0x74, 0xdc, 0x90,
-	0x76, 0x5c, 0xc6, 0xb8, 0x74, 0x25, 0xe5, 0x2c, 0x8a, 0xb9, 0xad, 0x1f, 0xf2, 0xb0, 0x75, 0xe9,
-	0x8d, 0x08, 0x9e, 0x04, 0x44, 0x3c, 0xe3, 0x6c, 0x48, 0x7d, 0xf4, 0x05, 0x54, 0x30, 0x15, 0xc4,
-	0x53, 0xbc, 0x86, 0xd1, 0x34, 0x0e, 0xea, 0x47, 0xbb, 0x87, 0x99, 0x7b, 0x4e, 0xd2, 0xcf, 0xf6,
-	0x9c, 0x89, 0x9e, 0x42, 0xcd, 0xc5, 0x98, 0xaa, 0xb3, 0x1b, 0x38, 0x83, 0x69, 0x23, 0xaf, 0x55,
-	0xf7, 0xb2, 0xaa, 0xdd, 0x19, 0xa5, 0xd7, 0xb7, 0xab, 0x73, 0x85, 0xde, 0x14, 0xed, 0x41, 0x39,
-	0x14, 0x94, 0x0b, 0x2a, 0x6f, 0x1a, 0x85, 0xa6, 0x71, 0x50, 0xb2, 0x67, 0x32, 0x7a, 0x17, 0x8a,
-	0x53, 0x42, 0xfd, 0x91, 0x6c, 0xdc, 0xd1, 0x5f, 0x12, 0x09, 0x75, 0xa1, 0x1a, 0x29, 0xf7, 0x9d,
-	0x90, 0x07, 0xd4, 0xbb, 0x69, 0x6c, 0xe8, 0x3b, 0xdf, 0xcf, 0xde, 0x99, 0x04, 0x48, 0x99, 0x7f,
-	0xa1, 0x59, 0xf6, 0xa6, 0xd6, 0x89, 0x85, 0xd6, 0x6f, 0x06, 0xa0, 0x2b, 0xe1, 0x0e, 0x87, 0xd4,
-	0xbb, 0x1c, 0xb9, 0x21, 0x65, 0xbe, 0xc5, 0x86, 0x1c, 0x99, 0x50, 0xf0, 0xa8, 0xd0, 0xf1, 0x97,
-	0x6c, 0x75, 0xd4, 0xc8, 0x20, 0xd2, 0x61, 0x29, 0x64, 0x10, 0x29, 0x24, 0xa4, 0x22, 0x71, 0x56,
-	0x1d, 0x35, 0x32, 0x88, 0x12, 0x27, 0xd5, 0x51, 0x21, 0x3e, 0x15, 0xda, 0xb1, 0x92, 0xad, 0x8e,
-	0xe8, 0x0c, 0xc0, 0xc5, 0xd8, 0x19, 0x4c, 0x1d, 0xca, 0x70, 0xa3, 0xa8, 0x3d, 0x6e, 0x67, 0x3d,
-	0xb6, 0xd8, 0x90, 0x08, 0x41, 0x70, 0x9a, 0xad, 0x5e, 0xdf, 0x62, 0x98, 0x7a, 0xba, 0x74, 0x76,
-	0xd9, 0xc5, 0xb8, 0x37, 0xb5, 0x18, 0x6e, 0xfd, 0x9c, 0x07, 0x33, 0x75, 0x3d, 0x2d, 0xe2, 0xdb,
-	0x96, 0xef, 0x3d, 0x28, 0xbb, 0x41, 0xc0, 0x3d, 0x87, 0xe2, 0x24, 0xc4, 0x92, 0x96, 0x2d, 0x8c,
-	0x9e, 0x40, 0x25, 0x4a, 0xcd, 0xeb, 0x60, 0x37, 0x8f, 0xf6, 0xd7, 0x66, 0x38, 0x6d, 0x21, 0x7b,
-	0xce, 0x47, 0x36, 0xbc, 0x23, 0x63, 0x17, 0x9d, 0x28, 0x4e, 0xaf, 0x43, 0xd9, 0x90, 0xeb, 0x14,
-	0x6d, 0x1e, 0x35, 0xb3, 0x76, 0x56, 0xeb, 0x60, 0x23, 0xb9, 0x5a, 0x9b, 0x8f, 0x61, 0x6b, 0x51,
-	0x4d, 0xb9, 0x1c, 0xe7, 0xb7, 0xa6, 0xe0, 0x8b, 0x18, 0xb5, 0x70, 0xeb, 0x77, 0x03, 0xb6, 0x97,
-	0xf3, 0x13, 0xa1, 0x5d, 0x28, 0x51, 0x26, 0x87, 0x4a, 0x2b, 0xae, 0x6e, 0x51, 0x89, 0x16, 0x46,
-	0x3b, 0x50, 0xe4, 0x6c, 0x32, 0x4f, 0xc0, 0x06, 0x67, 0x93, 0x18, 0x9e, 0x30, 0xaa, 0xe0, 0xb8,
-	0xac, 0x1b, 0x13, 0x46, 0x2d, 0xac, 0xcc, 0x84, 0x5c, 0x48, 0x87, 0xf1, 0xe4, 0xf2, 0xa2, 0x12,
-	0xcf, 0x39, 0x3a, 0x85, 0xfa, 0x2c, 0x62, 0x75, 0x6b, 0xd4, 0x28, 0x34, 0x0b, 0x07, 0x9b, 0xcb,
-	0x5d, 0xb9, 0xec, 0x98, 0x5d, 0x93, 0x0b, 0x48, 0xd4, 0x7a, 0x0c, 0x3b, 0x57, 0x2e, 0x0d, 0x4e,
-	0x04, 0x0f, 0x4f, 0x68, 0xe4, 0xb9, 0x02, 0x27, 0xf3, 0xb9, 0x0f, 0xf0, 0xfd, 0x84, 0x4c, 0x88,
-	0x13, 0xd1, 0xd7, 0x24, 0x09, 0xa1, 0xa2, 0x91, 0x4b, 0xfa, 0x9a, 0xb4, 0x7e, 0x34, 0xc0, 0xb4,
-	0x09, 0xce, 0xea, 0x3c, 0x80, 0xda, 0x98, 0x32, 0x47, 0x8e, 0x04, 0x89, 0x46, 0x3c, 0x48, 0x23,
-	0xaf, 0x8e, 0x29, 0xbb, 0x4a, 0x31, 0x4d, 0x72, 0xaf, 0x17, 0x48, 0xf9, 0x84, 0xe4, 0x5e, 0xcf,
-	0x49, 0x8f, 0x60, 0x4b, 0x91, 0x42, 0xc1, 0x07, 0xee, 0x80, 0x06, 0xf3, 0x61, 0xad, 0x8f, 0xdd,
-	0xeb, 0x8b, 0x39, 0xda, 0xfa, 0xd5, 0x80, 0xed, 0xfe, 0x8a, 0x23, 0xc7, 0xb0, 0xe1, 0x0b, 0x42,
-	0xe2, 0xce, 0x5c, 0xc9, 0xc9, 0x32, 0xdd, 0x8e, 0xc9, 0xe8, 0x31, 0x14, 0x6f, 0x48, 0x10, 0xf0,
-	0x78, 0xa9, 0xbc, 0x59, 0x2d, 0x61, 0xa3, 0xcf, 0xa0, 0x20, 0x08, 0x4e, 0x7a, 0xf6, 0x4d, 0x4a,
-	0x8a, 0xda, 0xfa, 0x33, 0x0f, 0xb5, 0xac, 0xc7, 0x3d, 0xa8, 0xe3, 0x18, 0x48, 0x97, 0x4c, 0x3c,
-	0x54, 0xf7, 0x96, 0x87, 0x4a, 0x73, 0x92, 0x0d, 0x53, 0xc3, 0x8b, 0x22, 0xfa, 0x0e, 0x1a, 0xd2,
-	0xa5, 0x81, 0x83, 0x05, 0x0f, 0x9d, 0xd4, 0x9a, 0xa7, 0xed, 0x27, 0x11, 0x3d, 0x58, 0x6a, 0x8e,
-	0x75, 0x95, 0x3f, 0xcb, 0xd9, 0x3b, 0x72, 0x6d, 0x4b, 0x9c, 0x03, 0x12, 0x04, 0x2f, 0x5b, 0xfe,
-	0x47, 0x61, 0x9f, 0xe5, 0x6c, 0x53, 0x2c, 0x57, 0xe9, 0x25, 0xdc, 0x9d, 0xae, 0x31, 0x18, 0xcf,
-	0xec, 0x07, 0x59, 0x83, 0xfd, 0x35, 0x16, 0xb7, 0xa7, 0xcb, 0x26, 0x7b, 0xe6, 0x3c, 0x8d, 0xb1,
-	0xb5, 0xd6, 0x2f, 0x05, 0xa8, 0x26, 0x43, 0xf0, 0x52, 0x75, 0xef, 0xdb, 0x6e, 0xae, 0x7d, 0x00,
-	0x9f, 0x8c, 0xf5, 0x2c, 0xce, 0x46, 0xb7, 0x92, 0x20, 0x16, 0x56, 0x8b, 0x2d, 0x1c, 0x50, 0xe9,
-	0x8c, 0xdd, 0x50, 0x67, 0xa4, 0x62, 0x97, 0x94, 0xfc, 0xdc, 0x0d, 0xd1, 0x43, 0xa8, 0xbb, 0x24,
-	0x72, 0x08, 0xf3, 0xc4, 0x4d, 0xa8, 0x6f, 0x55, 0x11, 0x96, 0xed, 0x9a, 0x4b, 0xa2, 0xd3, 0x19,
-	0xf8, 0x1f, 0x3c, 0x32, 0x99, 0xb7, 0xad, 0xf8, 0xb7, 0x6f, 0x5b, 0x29, 0xf3, 0xb6, 0xad, 0x36,
-	0x5e, 0xf9, 0x5f, 0x37, 0x5e, 0x6f, 0x39, 0xeb, 0x8d, 0x8a, 0xae, 0xe1, 0x7a, 0x1b, 0xc9, 0x20,
-	0xa4, 0x36, 0x62, 0xb1, 0xf5, 0x87, 0x01, 0xb5, 0xc5, 0x3a, 0xfd, 0xff, 0x1b, 0xb4, 0x3b, 0xdf,
-	0xa0, 0x7a, 0xaf, 0x45, 0x8d, 0xa2, 0xde, 0xa0, 0x7b, 0x6b, 0x37, 0xa8, 0x76, 0x6a, 0xb6, 0x3d,
-	0x13, 0x17, 0xd7, 0x3c, 0x11, 0xa5, 0x35, 0x4f, 0x44, 0xfb, 0x4b, 0xa8, 0xcc, 0x9a, 0x0a, 0x55,
-	0xa1, 0xfc, 0xf5, 0xc5, 0xe5, 0x95, 0x7d, 0xda, 0x7d, 0x6e, 0xe6, 0x50, 0x1d, 0xe0, 0xe4, 0x45,
-	0xff, 0x3c, 0x91, 0x0d, 0xb4, 0x0d, 0xb5, 0x9e, 0x75, 0x62, 0xd9, 0xa7, 0xcf, 0xae, 0xac, 0x17,
-	0xe7, 0xdd, 0xaf, 0xcc, 0x7c, 0xfb, 0x09, 0x98, 0xcb, 0x75, 0x47, 0x25, 0x28, 0xf4, 0x6d, 0xdb,
-	0xcc, 0x21, 0x04, 0xf5, 0x4b, 0x29, 0xa8, 0x27, 0x2f, 0x92, 0x4a, 0x9b, 0x06, 0x02, 0x28, 0x9e,
-	0xdd, 0x0c, 0x04, 0xc5, 0x66, 0xbe, 0xcd, 0xa0, 0xba, 0xf8, 0x37, 0x84, 0x76, 0x60, 0x7b, 0x51,
-	0x76, 0xce, 0x39, 0x23, 0x66, 0x0e, 0xdd, 0x85, 0xad, 0x2c, 0xdc, 0x35, 0x0d, 0x74, 0x0f, 0x76,
-	0x33, 0x60, 0x8f, 0x44, 0xf2, 0x74, 0x38, 0xe4, 0x42, 0x9a, 0xf9, 0x15, 0x43, 0xdd, 0x89, 0xe4,
-	0x66, 0xa1, 0xfd, 0x74, 0xb6, 0xd9, 0x12, 0x4f, 0xab, 0x50, 0x4e, 0xf7, 0x8c, 0x99, 0x43, 0x35,
-	0xa8, 0xf4, 0x67, 0xa2, 0xa1, 0xc2, 0xb0, 0x09, 0x36, 0xf3, 0xa8, 0x0c, 0x77, 0xd4, 0x88, 0x9b,
-	0x85, 0xf6, 0x4f, 0x06, 0xdc, 0xbf, 0xed, 0xcf, 0x04, 0x3d, 0x84, 0x0f, 0x6f, 0xfb, 0x9e, 0x46,
-	0x74, 0x00, 0x1f, 0xdd, 0x4a, 0xeb, 0x46, 0xd1, 0x44, 0x10, 0x6c, 0x1a, 0xe8, 0x13, 0x78, 0x74,
-	0x2b, 0x73, 0x31, 0xec, 0xde, 0x37, 0xd0, 0xe4, 0xc2, 0x3f, 0xe4, 0x21, 0x61, 0x1e, 0x17, 0xf8,
-	0x30, 0xfe, 0x51, 0xce, 0xb4, 0xcc, 0xb7, 0xc7, 0x3e, 0x95, 0xa3, 0xc9, 0xe0, 0xd0, 0xe3, 0xe3,
-	0x4e, 0x4a, 0xec, 0xc4, 0xc4, 0x4f, 0x93, 0x3f, 0xea, 0x57, 0xc7, 0x1d, 0x9f, 0x67, 0xfe, 0xab,
-	0x07, 0x45, 0xfd, 0xe9, 0xf3, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x1e, 0x89, 0x67, 0x3b, 0x7c,
-	0x0b, 0x00, 0x00,
+	// 2138 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x4d, 0x6f, 0x1b, 0xb9,
+	0xf9, 0xb7, 0x24, 0x5b, 0x2f, 0x8f, 0x24, 0x7b, 0x4c, 0xc7, 0x1b, 0xc5, 0x71, 0x36, 0x8e, 0xf6,
+	0xbf, 0xff, 0x75, 0x5d, 0x34, 0xee, 0x3a, 0x4e, 0xf6, 0x90, 0x6d, 0x17, 0x52, 0x6c, 0x24, 0x6a,
+	0x37, 0x8e, 0x3d, 0x76, 0xeb, 0xa2, 0x87, 0x0e, 0x46, 0x33, 0x94, 0xcc, 0xee, 0x88, 0x1c, 0x93,
+	0x9c, 0x38, 0xde, 0x53, 0x51, 0xa0, 0x9f, 0xa2, 0xed, 0xa5, 0x40, 0xaf, 0xbd, 0xec, 0xa5, 0x68,
+	0x2f, 0x05, 0xfa, 0x2d, 0xfa, 0x09, 0x0a, 0xf4, 0x4b, 0x14, 0xe4, 0xcc, 0x68, 0x5e, 0xa4, 0x24,
+	0x76, 0xaa, 0x2c, 0xd0, 0xde, 0xc8, 0x87, 0x3f, 0x3e, 0x7c, 0xf8, 0xbc, 0xfc, 0x48, 0xce, 0xc0,
+	0xc6, 0x4b, 0xe6, 0xc9, 0x33, 0xdb, 0xf2, 0x39, 0x93, 0x4c, 0x6c, 0x4b, 0xec, 0x9c, 0xa9, 0xf6,
+	0x80, 0x78, 0xf8, 0xbe, 0x96, 0xa1, 0x46, 0x5a, 0xb6, 0xb6, 0x3e, 0x64, 0x6c, 0xe8, 0xe1, 0x6d,
+	0xdb, 0x27, 0xdb, 0x36, 0xa5, 0x4c, 0xda, 0x92, 0x30, 0x2a, 0x42, 0x6c, 0xfb, 0x57, 0x45, 0x58,
+	0x3a, 0x76, 0xce, 0xb0, 0x1b, 0x78, 0x98, 0x3f, 0x61, 0x74, 0x40, 0x86, 0xe8, 0x21, 0xd4, 0x5c,
+	0xc2, 0xb1, 0xa3, 0x70, 0xad, 0xc2, 0x46, 0x61, 0x73, 0x71, 0xe7, 0xe6, 0xfd, 0xcc, 0x3a, 0x7b,
+	0xf1, 0xb0, 0x99, 0x20, 0xd1, 0x17, 0xd0, 0xb4, 0x5d, 0x97, 0xa8, 0xb6, 0xed, 0x59, 0xfd, 0x8b,
+	0x56, 0x51, 0x4f, 0x5d, 0xcb, 0x4e, 0xed, 0x8c, 0x21, 0xdd, 0x53, 0xb3, 0x91, 0x4c, 0xe8, 0x5e,
+	0xa0, 0x35, 0xa8, 0xfa, 0x9c, 0x30, 0x4e, 0xe4, 0x65, 0xab, 0xb4, 0x51, 0xd8, 0xac, 0x98, 0xe3,
+	0x3e, 0xfa, 0x00, 0xca, 0x17, 0x98, 0x0c, 0xcf, 0x64, 0x6b, 0x5e, 0x8f, 0x44, 0x3d, 0xd4, 0x81,
+	0x86, 0x50, 0xe6, 0x5b, 0x3e, 0xf3, 0x88, 0x73, 0xd9, 0x5a, 0xd0, 0x6b, 0x7e, 0x98, 0x5d, 0x33,
+	0xda, 0x20, 0xa1, 0xc3, 0x43, 0x8d, 0x32, 0xeb, 0x7a, 0x4e, 0xd8, 0x69, 0xff, 0xb9, 0x00, 0xe8,
+	0x84, 0xdb, 0x83, 0x01, 0x71, 0x8e, 0xcf, 0x6c, 0x9f, 0xd0, 0x61, 0x8f, 0x0e, 0x18, 0x32, 0xa0,
+	0xe4, 0x10, 0xae, 0xf7, 0x5f, 0x31, 0x55, 0x53, 0x4b, 0xfa, 0x42, 0x6f, 0x4b, 0x49, 0xfa, 0x42,
+	0x49, 0x7c, 0xc2, 0x23, 0x63, 0x55, 0x53, 0x4b, 0xfa, 0x22, 0x32, 0x52, 0x35, 0x95, 0x64, 0x48,
+	0xb8, 0x36, 0xac, 0x62, 0xaa, 0x26, 0x7a, 0x06, 0x60, 0xbb, 0xae, 0xd5, 0xbf, 0xb0, 0x08, 0x75,
+	0x5b, 0x65, 0x6d, 0xf1, 0x56, 0xd6, 0xe2, 0x1e, 0x1d, 0x60, 0xce, 0xb1, 0x1b, 0x7b, 0xab, 0x7b,
+	0xda, 0xa3, 0x2e, 0x71, 0x74, 0xe8, 0xcc, 0xaa, 0xed, 0xba, 0xdd, 0x8b, 0x1e, 0x75, 0xdb, 0xbf,
+	0x2f, 0x82, 0x11, 0x9b, 0x1e, 0x07, 0xf1, 0x5d, 0xc3, 0x77, 0x0b, 0xaa, 0xb6, 0xe7, 0x31, 0xc7,
+	0x22, 0x6e, 0xb4, 0xc5, 0x8a, 0xee, 0xf7, 0x5c, 0xf4, 0x18, 0x6a, 0x22, 0x56, 0xaf, 0x37, 0x5b,
+	0xdf, 0xb9, 0x33, 0xd5, 0xc3, 0x71, 0x0a, 0x99, 0x09, 0x1e, 0x99, 0x70, 0x43, 0x86, 0x26, 0x5a,
+	0x22, 0x74, 0xaf, 0x45, 0xe8, 0x80, 0x69, 0x17, 0xd5, 0x77, 0x36, 0xb2, 0x7a, 0x26, 0xe3, 0x60,
+	0x22, 0x39, 0x19, 0x9b, 0xff, 0x87, 0xa5, 0xf4, 0x34, 0x65, 0x72, 0xe8, 0xdf, 0xa6, 0x12, 0x1f,
+	0x86, 0xd2, 0x9e, 0xdb, 0xfe, 0x4b, 0x01, 0x96, 0xf3, 0xfe, 0x11, 0xe8, 0x26, 0x54, 0x08, 0x95,
+	0x03, 0x35, 0x2b, 0x8c, 0x6e, 0x59, 0x75, 0x7b, 0x2e, 0x5a, 0x85, 0x32, 0xa3, 0x41, 0xe2, 0x80,
+	0x05, 0x46, 0x83, 0x50, 0x1c, 0x50, 0xa2, 0xc4, 0x61, 0x58, 0x17, 0x02, 0x4a, 0x7a, 0xae, 0x52,
+	0xe3, 0x33, 0x2e, 0x2d, 0xca, 0xa2, 0xc5, 0xcb, 0xaa, 0x7b, 0xc0, 0xd0, 0x3e, 0x2c, 0x8e, 0x77,
+	0xac, 0x56, 0x15, 0xad, 0xd2, 0x46, 0x69, 0xb3, 0x9e, 0xcf, 0xca, 0xbc, 0x61, 0x66, 0x53, 0xa6,
+	0x24, 0xa2, 0xfd, 0x08, 0x56, 0x4f, 0x6c, 0xe2, 0xed, 0x71, 0xe6, 0xef, 0x11, 0xe1, 0xd8, 0xdc,
+	0x8d, 0xea, 0xf3, 0x0e, 0xc0, 0x79, 0x80, 0x03, 0x6c, 0x09, 0xf2, 0x35, 0x8e, 0xb6, 0x50, 0xd3,
+	0x92, 0x63, 0xf2, 0x35, 0x6e, 0xff, 0xa6, 0x00, 0x86, 0x89, 0xdd, 0xec, 0x9c, 0x8f, 0xa0, 0x39,
+	0x22, 0xd4, 0x92, 0x67, 0x1c, 0x8b, 0x33, 0xe6, 0xc5, 0x3b, 0x6f, 0x8c, 0x08, 0x3d, 0x89, 0x65,
+	0x1a, 0x64, 0xbf, 0x4a, 0x81, 0x8a, 0x11, 0xc8, 0x7e, 0x95, 0x80, 0x3e, 0x81, 0x25, 0x05, 0xf2,
+	0x39, 0xeb, 0xdb, 0x7d, 0xe2, 0x25, 0xc5, 0xba, 0x38, 0xb2, 0x5f, 0x1d, 0x26, 0xd2, 0xf6, 0x37,
+	0x05, 0x58, 0x3e, 0x9d, 0x30, 0x64, 0x17, 0x16, 0x86, 0x1c, 0xe3, 0x30, 0x33, 0x27, 0x7c, 0x92,
+	0x87, 0x9b, 0x21, 0x18, 0x3d, 0x82, 0xf2, 0x25, 0xf6, 0x3c, 0x16, 0x92, 0xca, 0xdb, 0xa7, 0x45,
+	0x68, 0xf4, 0x7d, 0x28, 0x71, 0xec, 0x46, 0x39, 0xfb, 0xb6, 0x49, 0x0a, 0xda, 0xfe, 0x67, 0x11,
+	0x9a, 0x59, 0x8b, 0xbb, 0xb0, 0xe8, 0x86, 0x82, 0x98, 0x64, 0xc2, 0xa2, 0xba, 0x9d, 0x2f, 0x2a,
+	0x8d, 0x89, 0x18, 0xa6, 0xe9, 0xa6, 0xbb, 0xe8, 0x17, 0xd0, 0x92, 0x36, 0xf1, 0x2c, 0x97, 0x33,
+	0xdf, 0x8a, 0xb5, 0x39, 0x5a, 0x7f, 0xb4, 0xa3, 0x8f, 0x72, 0xc9, 0x31, 0x2d, 0xf2, 0xcf, 0xe6,
+	0xcc, 0x55, 0x39, 0x35, 0x25, 0x0e, 0x00, 0x71, 0xec, 0xe6, 0x35, 0x5f, 0x69, 0xdb, 0xcf, 0xe6,
+	0x4c, 0x83, 0xe7, 0xa3, 0x74, 0x04, 0x2b, 0x17, 0x53, 0x14, 0x86, 0x35, 0x7b, 0x37, 0xab, 0xf0,
+	0x74, 0x8a, 0xc6, 0xe5, 0x8b, 0xbc, 0xca, 0xae, 0x91, 0xb8, 0x31, 0xd4, 0xd6, 0xfe, 0x63, 0x09,
+	0x1a, 0x51, 0x11, 0x1c, 0xa9, 0xec, 0x7d, 0x57, 0xe6, 0xba, 0x03, 0x30, 0xc4, 0x23, 0x5d, 0x8b,
+	0xe3, 0xd2, 0xad, 0x45, 0x92, 0x9e, 0xab, 0x88, 0xcd, 0xef, 0x13, 0x69, 0x8d, 0x6c, 0x5f, 0x7b,
+	0xa4, 0x66, 0x56, 0x54, 0xff, 0xb9, 0xed, 0xa3, 0x8f, 0x61, 0xd1, 0xc6, 0xc2, 0xc2, 0xd4, 0xe1,
+	0x97, 0xbe, 0x5e, 0x55, 0xed, 0xb0, 0x6a, 0x36, 0x6d, 0x2c, 0xf6, 0xc7, 0xc2, 0x19, 0x1c, 0x32,
+	0x99, 0xb3, 0xad, 0xfc, 0xda, 0xb3, 0xad, 0x92, 0x39, 0xdb, 0x26, 0x13, 0xaf, 0x7a, 0xed, 0xc4,
+	0xeb, 0xe6, 0xbd, 0xde, 0xaa, 0xe9, 0x18, 0x4e, 0xd7, 0x11, 0x15, 0x42, 0xac, 0x23, 0xec, 0xb6,
+	0xff, 0x51, 0x80, 0x66, 0x3a, 0x4e, 0xef, 0x9f, 0x41, 0x3b, 0x09, 0x83, 0x6a, 0x5e, 0x13, 0xad,
+	0xb2, 0x66, 0xd0, 0xb5, 0xa9, 0x0c, 0xaa, 0x8d, 0x1a, 0xb3, 0x67, 0x64, 0xe2, 0x94, 0x23, 0xa2,
+	0x32, 0xed, 0x88, 0x18, 0xc0, 0x52, 0x8f, 0x0a, 0x69, 0x53, 0x07, 0x3f, 0x61, 0x54, 0x72, 0xe6,
+	0xa9, 0x13, 0x9b, 0xd1, 0x40, 0xef, 0xac, 0x66, 0xaa, 0xa6, 0x92, 0x04, 0x94, 0xe8, 0x3d, 0xd5,
+	0x4c, 0xd5, 0x44, 0xdb, 0x70, 0x43, 0xb1, 0xe0, 0x10, 0x8f, 0x2c, 0xdf, 0xbe, 0xf4, 0x98, 0xed,
+	0x86, 0x6c, 0x1c, 0x26, 0xd8, 0xf2, 0xc8, 0x7e, 0xf5, 0x14, 0x8f, 0x0e, 0xc3, 0x11, 0xcd, 0xca,
+	0xbf, 0x2e, 0x42, 0xfd, 0x68, 0xcc, 0xa2, 0x02, 0xdd, 0x83, 0xc6, 0x79, 0xc2, 0xb4, 0x9f, 0xea,
+	0xd5, 0x9a, 0x66, 0xfd, 0x7c, 0x0c, 0xf9, 0x34, 0x07, 0xd9, 0xd1, 0xcb, 0x67, 0x20, 0x3b, 0x39,
+	0xc8, 0x03, 0xbd, 0x7c, 0x06, 0xf2, 0x20, 0x07, 0xd9, 0xd5, 0x11, 0xc8, 0x40, 0x76, 0x73, 0x90,
+	0x87, 0x3a, 0x18, 0x19, 0xc8, 0xc3, 0x1c, 0xe4, 0x91, 0xce, 0xe1, 0x0c, 0xe4, 0x51, 0x0e, 0xf2,
+	0x99, 0x76, 0x77, 0x06, 0xf2, 0x59, 0xfb, 0x9b, 0x05, 0x58, 0x56, 0x7e, 0x61, 0x5c, 0x76, 0xa4,
+	0xe4, 0xa4, 0x1f, 0x48, 0x2c, 0x72, 0xf5, 0x5b, 0xc8, 0xd7, 0xef, 0x3a, 0x80, 0x72, 0xf5, 0x79,
+	0xe8, 0xe0, 0x30, 0x06, 0xd5, 0x91, 0xfd, 0xea, 0x48, 0xf9, 0xf5, 0xfa, 0xd5, 0x5d, 0xcb, 0x57,
+	0xf7, 0x8f, 0x61, 0x59, 0x8c, 0x6b, 0xf7, 0x7a, 0x25, 0x6e, 0x88, 0x9c, 0x44, 0xed, 0x25, 0xae,
+	0x6b, 0xeb, 0x3c, 0xaa, 0xf4, 0x5a, 0x2c, 0x39, 0x7a, 0xaf, 0xa5, 0xbe, 0xff, 0x9a, 0x52, 0x7f,
+	0xdb, 0xb1, 0x97, 0xad, 0x76, 0xf4, 0x14, 0x96, 0xb3, 0x6a, 0xac, 0x97, 0x3b, 0xad, 0xc5, 0xb7,
+	0x93, 0xc6, 0x52, 0x46, 0xcd, 0x4f, 0x75, 0x6e, 0x12, 0x61, 0x8d, 0x02, 0x4f, 0x12, 0xc7, 0x16,
+	0xb2, 0x05, 0xda, 0xf9, 0x75, 0x22, 0x9e, 0xc7, 0x22, 0xb4, 0x09, 0xc6, 0x78, 0x5c, 0xd7, 0x12,
+	0x71, 0x5b, 0xf5, 0xe8, 0x32, 0x11, 0xcb, 0x9f, 0xe2, 0x51, 0xcf, 0x45, 0x3f, 0x80, 0xdb, 0xee,
+	0x25, 0xb5, 0x47, 0xc4, 0xb1, 0x6c, 0xc7, 0xc1, 0x42, 0x28, 0xe3, 0x54, 0xb5, 0x5a, 0x1e, 0x11,
+	0xb2, 0xd5, 0xd0, 0xba, 0x5b, 0x11, 0xa4, 0xa3, 0x11, 0x51, 0x39, 0x7f, 0x49, 0x84, 0x44, 0x8f,
+	0x61, 0x4d, 0xa8, 0x87, 0xcf, 0xf4, 0xd9, 0x4d, 0x3d, 0xfb, 0x66, 0x88, 0x98, 0x98, 0xdc, 0xfe,
+	0x43, 0x11, 0x56, 0xc6, 0xb7, 0xb4, 0x54, 0xde, 0xce, 0xe8, 0xa2, 0xdd, 0x4c, 0x2e, 0xda, 0x13,
+	0x4f, 0xa8, 0xd2, 0x7f, 0xf0, 0x84, 0x9a, 0x7f, 0xed, 0x31, 0xb3, 0x90, 0xc9, 0xbd, 0x3d, 0x58,
+	0x3c, 0xb7, 0x32, 0xe7, 0x5b, 0xf9, 0x4a, 0xc9, 0xdf, 0x38, 0x3f, 0x4e, 0xbd, 0xa2, 0xfe, 0x56,
+	0x86, 0x95, 0xfd, 0xc3, 0x17, 0x07, 0x9a, 0x7e, 0x53, 0x4e, 0xca, 0x56, 0x6f, 0xe1, 0x0d, 0xd5,
+	0x5b, 0xcc, 0x56, 0x6f, 0x96, 0x15, 0x42, 0x62, 0x4b, 0xb1, 0xc2, 0x15, 0x8b, 0xfb, 0x1e, 0x34,
+	0xe2, 0x93, 0x44, 0x5e, 0xfa, 0x58, 0x6f, 0xbd, 0x66, 0xd6, 0x23, 0xd9, 0xc9, 0xa5, 0x8f, 0xd1,
+	0x2e, 0x7c, 0x10, 0x50, 0xa1, 0x76, 0x41, 0x24, 0x76, 0xad, 0x21, 0xb7, 0xa9, 0x0c, 0xad, 0x0d,
+	0x49, 0xee, 0x46, 0x6a, 0xf4, 0xa9, 0x1a, 0xd4, 0x96, 0x7f, 0x07, 0x0c, 0xca, 0x46, 0x44, 0xc5,
+	0x89, 0x50, 0x89, 0xf9, 0x4b, 0xdb, 0x8b, 0x18, 0x6f, 0x29, 0x92, 0xf7, 0x22, 0x31, 0xda, 0x81,
+	0x55, 0xc9, 0x3c, 0xcc, 0x6d, 0x19, 0xba, 0xd8, 0xb3, 0x7e, 0x49, 0xa4, 0xc4, 0x5c, 0xd7, 0x78,
+	0xd3, 0x5c, 0x19, 0x0f, 0x1e, 0x32, 0xcf, 0xfb, 0x91, 0x1e, 0x42, 0x3f, 0x84, 0xdb, 0x1c, 0x9f,
+	0x07, 0x58, 0x48, 0x4b, 0x72, 0x9b, 0x8a, 0x11, 0x11, 0x82, 0x30, 0x1a, 0x47, 0xa8, 0xa6, 0x67,
+	0xde, 0x8a, 0x20, 0x27, 0x29, 0x44, 0x44, 0x06, 0xeb, 0x00, 0x34, 0x18, 0x29, 0xb7, 0x63, 0x29,
+	0x74, 0xe9, 0x35, 0xcd, 0x2a, 0x0d, 0x46, 0x47, 0xc7, 0x58, 0x0a, 0xf4, 0x79, 0x86, 0xaa, 0x85,
+	0xae, 0xb9, 0xfa, 0xce, 0xad, 0x6c, 0xc0, 0x53, 0xa7, 0x55, 0x9a, 0xc5, 0xc5, 0x74, 0xc2, 0x6c,
+	0xcc, 0x84, 0x30, 0x9b, 0x61, 0x98, 0xa7, 0x11, 0xe6, 0xa2, 0x1e, 0x7a, 0x3d, 0x61, 0x2e, 0xcd,
+	0x80, 0x30, 0x8d, 0x99, 0x11, 0xe6, 0xf2, 0xf5, 0x09, 0xb3, 0xfd, 0xa7, 0x79, 0xa8, 0x9f, 0x24,
+	0x97, 0x13, 0x84, 0x60, 0x9e, 0xda, 0xa3, 0xb8, 0x68, 0x74, 0x1b, 0xb5, 0xa0, 0xf2, 0x12, 0x73,
+	0x15, 0xe8, 0x98, 0x3b, 0xa2, 0xae, 0xca, 0xf4, 0xf8, 0xae, 0xa3, 0x33, 0x3d, 0x3c, 0x0c, 0xeb,
+	0x91, 0x4c, 0x67, 0x7a, 0x1b, 0x9a, 0x2a, 0x29, 0xf4, 0xa5, 0x85, 0x71, 0x29, 0xe2, 0xbb, 0x00,
+	0x0d, 0x46, 0xd1, 0xa9, 0x2c, 0xd0, 0x33, 0x30, 0x48, 0x74, 0x1f, 0x8a, 0x49, 0x52, 0x17, 0xcd,
+	0xc4, 0x93, 0x3f, 0x77, 0x6b, 0x32, 0x97, 0x48, 0xee, 0x1a, 0xb5, 0x07, 0x8d, 0x40, 0x58, 0xc9,
+	0x87, 0x83, 0xb2, 0xd6, 0x72, 0xef, 0x35, 0x1f, 0x0e, 0x12, 0xca, 0x30, 0xeb, 0x81, 0x48, 0xbe,
+	0x66, 0xec, 0x41, 0xc3, 0x4d, 0x6b, 0xa9, 0x5c, 0x59, 0x8b, 0x9b, 0xd2, 0x32, 0x84, 0x8d, 0xc0,
+	0x17, 0x92, 0x63, 0x3b, 0xd9, 0xbe, 0x65, 0xc7, 0xe0, 0xf0, 0x14, 0xa8, 0xea, 0x2b, 0x66, 0xee,
+	0x71, 0x33, 0x71, 0x5b, 0x31, 0xd7, 0x63, 0x45, 0xf9, 0x21, 0x7d, 0xd0, 0x7c, 0x05, 0x6d, 0x97,
+	0x5d, 0xd0, 0xb7, 0x2c, 0x55, 0xbb, 0xda, 0x52, 0x1f, 0x26, 0xaa, 0xa6, 0x2d, 0xd6, 0xfe, 0x7b,
+	0x09, 0x96, 0xf6, 0x7d, 0x46, 0xff, 0x87, 0x92, 0x46, 0x19, 0x64, 0x3b, 0x5f, 0xd9, 0xc3, 0xc8,
+	0xa0, 0x72, 0x64, 0x50, 0x28, 0xd3, 0x06, 0xb9, 0x70, 0x67, 0x1c, 0xcb, 0xf0, 0x3b, 0x48, 0xce,
+	0xbb, 0x15, 0xed, 0xdd, 0x5c, 0x8a, 0x4c, 0x39, 0x9b, 0xcc, 0xb5, 0x58, 0x4f, 0x76, 0x40, 0x07,
+	0xf2, 0x0c, 0xee, 0xa6, 0x02, 0x39, 0x75, 0x9d, 0xea, 0x55, 0xd7, 0x59, 0x4f, 0x34, 0x4d, 0xae,
+	0xd4, 0xfe, 0xd7, 0x3c, 0xac, 0xa4, 0x22, 0x18, 0xbb, 0xe8, 0x9a, 0x91, 0x7c, 0x00, 0xab, 0x22,
+	0xe8, 0x0b, 0x87, 0x93, 0x3e, 0xe6, 0x16, 0x71, 0x31, 0x95, 0x64, 0x40, 0xa2, 0xef, 0x75, 0x35,
+	0xf3, 0x46, 0x32, 0xd8, 0x1b, 0x8f, 0x4d, 0x84, 0x7f, 0xfe, 0x0a, 0xe1, 0x5f, 0xb8, 0x5a, 0xf8,
+	0xcb, 0x33, 0xe1, 0x8c, 0xca, 0x4c, 0x38, 0xa3, 0xfa, 0xde, 0x38, 0xa3, 0xf6, 0xed, 0x71, 0x06,
+	0xcc, 0x86, 0x33, 0x7e, 0x3b, 0x0f, 0x37, 0x73, 0x9c, 0xf1, 0x5f, 0x98, 0x71, 0xe9, 0x3b, 0x74,
+	0x39, 0x7b, 0x87, 0x9e, 0x96, 0x8c, 0x95, 0x99, 0x70, 0x51, 0xf5, 0x1d, 0xb8, 0xa8, 0xf6, 0x2d,
+	0x71, 0x11, 0xcc, 0x86, 0x8b, 0xfe, 0xaa, 0xbf, 0x1d, 0x0b, 0x16, 0x70, 0x27, 0x49, 0x8b, 0x15,
+	0x58, 0x90, 0x7e, 0xfc, 0x34, 0x6f, 0x9a, 0xf3, 0xd2, 0xef, 0xb9, 0x13, 0x81, 0x2c, 0x4e, 0x06,
+	0xf2, 0x9d, 0x12, 0x24, 0x1d, 0xd9, 0xf9, 0x6c, 0x64, 0xef, 0x42, 0x3d, 0x79, 0x11, 0xa8, 0xb4,
+	0x28, 0x6d, 0x36, 0x4d, 0x18, 0x3f, 0x09, 0xc4, 0xd6, 0xe7, 0x50, 0x1b, 0xbf, 0xb8, 0x50, 0x03,
+	0xaa, 0x3f, 0x39, 0x3c, 0x3e, 0x31, 0xf7, 0x3b, 0xcf, 0x8d, 0x39, 0xb4, 0x08, 0xb0, 0xf7, 0xe2,
+	0xf4, 0x20, 0xea, 0x17, 0xd0, 0x32, 0x34, 0xbb, 0xbd, 0xbd, 0x9e, 0xb9, 0xff, 0xe4, 0xa4, 0xf7,
+	0xe2, 0xa0, 0xf3, 0xa5, 0x51, 0xdc, 0x7a, 0x0c, 0x46, 0xfe, 0xbe, 0x8a, 0x2a, 0x50, 0x3a, 0x35,
+	0x4d, 0x63, 0x0e, 0x21, 0x58, 0x3c, 0x96, 0x9c, 0x38, 0xf2, 0x30, 0xba, 0x9a, 0x1a, 0x05, 0x04,
+	0x50, 0x7e, 0x76, 0xd9, 0xe7, 0xc4, 0x35, 0x8a, 0x5b, 0x14, 0x1a, 0xe9, 0x67, 0x19, 0x5a, 0x85,
+	0xe5, 0x74, 0xdf, 0x3a, 0x60, 0x14, 0x1b, 0x73, 0x68, 0x05, 0x96, 0xb2, 0xe2, 0x8e, 0x51, 0x40,
+	0xb7, 0xe1, 0x66, 0x46, 0xd8, 0xc5, 0x42, 0xee, 0x0f, 0x06, 0x8c, 0x4b, 0xa3, 0x38, 0xa1, 0xa8,
+	0x13, 0x48, 0x66, 0x94, 0xb6, 0xbe, 0x18, 0x7f, 0xa5, 0x8e, 0x2c, 0x6d, 0x40, 0x35, 0xfe, 0x66,
+	0x6c, 0xcc, 0xa1, 0x26, 0xd4, 0x4e, 0xc7, 0xdd, 0x82, 0xda, 0x86, 0x89, 0x5d, 0xa3, 0x88, 0xaa,
+	0x30, 0x7f, 0xaa, 0x5a, 0xa5, 0xad, 0xdf, 0x15, 0x60, 0xfd, 0x4d, 0x7f, 0x99, 0xd0, 0xc7, 0x70,
+	0xef, 0x4d, 0xe3, 0xf1, 0x8e, 0x36, 0xe1, 0xff, 0xde, 0x08, 0xeb, 0x08, 0x11, 0x70, 0xec, 0x1a,
+	0x05, 0xf4, 0x5d, 0xf8, 0xe4, 0x8d, 0xc8, 0xf4, 0xb6, 0xbb, 0x3f, 0x83, 0x0d, 0xc6, 0x87, 0xf7,
+	0x99, 0x8f, 0xa9, 0xc3, 0xb8, 0x7b, 0x3f, 0xfc, 0xe9, 0x99, 0x49, 0xef, 0x9f, 0xef, 0x0e, 0x89,
+	0x3c, 0x0b, 0xfa, 0xf7, 0x1d, 0x36, 0xda, 0x8e, 0x81, 0xdb, 0x21, 0xf0, 0x7b, 0xd1, 0xdf, 0xd1,
+	0x97, 0xbb, 0xdb, 0x43, 0x96, 0xf9, 0x47, 0xda, 0x2f, 0xeb, 0xa1, 0x07, 0xff, 0x0e, 0x00, 0x00,
+	0xff, 0xff, 0xc7, 0x6b, 0x83, 0x2e, 0x48, 0x1d, 0x00, 0x00,
 }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
deleted file mode 100644
index dcdbf51..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package concurrency implements concurrency operations on top of
-// etcd such as distributed locks, barriers, and elections.
-package concurrency
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
deleted file mode 100644
index 2521db6..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/election.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
-	"context"
-	"errors"
-	"fmt"
-
-	v3 "go.etcd.io/etcd/clientv3"
-	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
-	"go.etcd.io/etcd/mvcc/mvccpb"
-)
-
-var (
-	ErrElectionNotLeader = errors.New("election: not leader")
-	ErrElectionNoLeader  = errors.New("election: no leader")
-)
-
-type Election struct {
-	session *Session
-
-	keyPrefix string
-
-	leaderKey     string
-	leaderRev     int64
-	leaderSession *Session
-	hdr           *pb.ResponseHeader
-}
-
-// NewElection returns a new election on a given key prefix.
-func NewElection(s *Session, pfx string) *Election {
-	return &Election{session: s, keyPrefix: pfx + "/"}
-}
-
-// ResumeElection initializes an election with a known leader.
-func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
-	return &Election{
-		keyPrefix:     pfx,
-		session:       s,
-		leaderKey:     leaderKey,
-		leaderRev:     leaderRev,
-		leaderSession: s,
-	}
-}
-
-// Campaign puts a value as eligible for the election on the prefix
-// key.
-// Multiple sessions can participate in the election for the
-// same prefix, but only one can be the leader at a time.
-//
-// If the context is 'context.TODO()/context.Background()', the Campaign
-// will continue to be blocked for other keys to be deleted, unless server
-// returns a non-recoverable error (e.g. ErrCompacted).
-// Otherwise, until the context is not cancelled or timed-out, Campaign will
-// continue to be blocked until it becomes the leader.
-func (e *Election) Campaign(ctx context.Context, val string) error {
-	s := e.session
-	client := e.session.Client()
-
-	k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
-	txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
-	txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
-	txn = txn.Else(v3.OpGet(k))
-	resp, err := txn.Commit()
-	if err != nil {
-		return err
-	}
-	e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
-	if !resp.Succeeded {
-		kv := resp.Responses[0].GetResponseRange().Kvs[0]
-		e.leaderRev = kv.CreateRevision
-		if string(kv.Value) != val {
-			if err = e.Proclaim(ctx, val); err != nil {
-				e.Resign(ctx)
-				return err
-			}
-		}
-	}
-
-	_, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
-	if err != nil {
-		// clean up in case of context cancel
-		select {
-		case <-ctx.Done():
-			e.Resign(client.Ctx())
-		default:
-			e.leaderSession = nil
-		}
-		return err
-	}
-	e.hdr = resp.Header
-
-	return nil
-}
-
-// Proclaim lets the leader announce a new value without another election.
-func (e *Election) Proclaim(ctx context.Context, val string) error {
-	if e.leaderSession == nil {
-		return ErrElectionNotLeader
-	}
-	client := e.session.Client()
-	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
-	txn := client.Txn(ctx).If(cmp)
-	txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
-	tresp, terr := txn.Commit()
-	if terr != nil {
-		return terr
-	}
-	if !tresp.Succeeded {
-		e.leaderKey = ""
-		return ErrElectionNotLeader
-	}
-
-	e.hdr = tresp.Header
-	return nil
-}
-
-// Resign lets a leader start a new election.
-func (e *Election) Resign(ctx context.Context) (err error) {
-	if e.leaderSession == nil {
-		return nil
-	}
-	client := e.session.Client()
-	cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
-	resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
-	if err == nil {
-		e.hdr = resp.Header
-	}
-	e.leaderKey = ""
-	e.leaderSession = nil
-	return err
-}
-
-// Leader returns the leader value for the current election.
-func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
-	client := e.session.Client()
-	resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
-	if err != nil {
-		return nil, err
-	} else if len(resp.Kvs) == 0 {
-		// no leader currently elected
-		return nil, ErrElectionNoLeader
-	}
-	return resp, nil
-}
-
-// Observe returns a channel that reliably observes ordered leader proposals
-// as GetResponse values on every current elected leader key. It will not
-// necessarily fetch all historical leader updates, but will always post the
-// most recent leader value.
-//
-// The channel closes when the context is canceled or the underlying watcher
-// is otherwise disrupted.
-func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
-	retc := make(chan v3.GetResponse)
-	go e.observe(ctx, retc)
-	return retc
-}
-
-func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
-	client := e.session.Client()
-
-	defer close(ch)
-	for {
-		resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
-		if err != nil {
-			return
-		}
-
-		var kv *mvccpb.KeyValue
-		var hdr *pb.ResponseHeader
-
-		if len(resp.Kvs) == 0 {
-			cctx, cancel := context.WithCancel(ctx)
-			// wait for first key put on prefix
-			opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
-			wch := client.Watch(cctx, e.keyPrefix, opts...)
-			for kv == nil {
-				wr, ok := <-wch
-				if !ok || wr.Err() != nil {
-					cancel()
-					return
-				}
-				// only accept puts; a delete will make observe() spin
-				for _, ev := range wr.Events {
-					if ev.Type == mvccpb.PUT {
-						hdr, kv = &wr.Header, ev.Kv
-						// may have multiple revs; hdr.rev = the last rev
-						// set to kv's rev in case batch has multiple Puts
-						hdr.Revision = kv.ModRevision
-						break
-					}
-				}
-			}
-			cancel()
-		} else {
-			hdr, kv = resp.Header, resp.Kvs[0]
-		}
-
-		select {
-		case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
-		case <-ctx.Done():
-			return
-		}
-
-		cctx, cancel := context.WithCancel(ctx)
-		wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
-		keyDeleted := false
-		for !keyDeleted {
-			wr, ok := <-wch
-			if !ok {
-				cancel()
-				return
-			}
-			for _, ev := range wr.Events {
-				if ev.Type == mvccpb.DELETE {
-					keyDeleted = true
-					break
-				}
-				resp.Header = &wr.Header
-				resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
-				select {
-				case ch <- *resp:
-				case <-cctx.Done():
-					cancel()
-					return
-				}
-			}
-		}
-		cancel()
-	}
-}
-
-// Key returns the leader key if elected, empty string otherwise.
-func (e *Election) Key() string { return e.leaderKey }
-
-// Rev returns the leader key's creation revision, if elected.
-func (e *Election) Rev() int64 { return e.leaderRev }
-
-// Header is the response header from the last successful election proposal.
-func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
deleted file mode 100644
index e4cf775..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/key.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
-	"context"
-	"fmt"
-
-	v3 "go.etcd.io/etcd/clientv3"
-	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
-	"go.etcd.io/etcd/mvcc/mvccpb"
-)
-
-func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
-	cctx, cancel := context.WithCancel(ctx)
-	defer cancel()
-
-	var wr v3.WatchResponse
-	wch := client.Watch(cctx, key, v3.WithRev(rev))
-	for wr = range wch {
-		for _, ev := range wr.Events {
-			if ev.Type == mvccpb.DELETE {
-				return nil
-			}
-		}
-	}
-	if err := wr.Err(); err != nil {
-		return err
-	}
-	if err := ctx.Err(); err != nil {
-		return err
-	}
-	return fmt.Errorf("lost watcher waiting for delete")
-}
-
-// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
-	getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
-	for {
-		resp, err := client.Get(ctx, pfx, getOpts...)
-		if err != nil {
-			return nil, err
-		}
-		if len(resp.Kvs) == 0 {
-			return resp.Header, nil
-		}
-		lastKey := string(resp.Kvs[0].Key)
-		if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
-			return nil, err
-		}
-	}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
deleted file mode 100644
index 306470b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/mutex.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-
-	v3 "go.etcd.io/etcd/clientv3"
-	pb "go.etcd.io/etcd/etcdserver/etcdserverpb"
-)
-
-// ErrLocked is returned by TryLock when Mutex is already locked by another session.
-var ErrLocked = errors.New("mutex: Locked by another session")
-
-// Mutex implements the sync Locker interface with etcd
-type Mutex struct {
-	s *Session
-
-	pfx   string
-	myKey string
-	myRev int64
-	hdr   *pb.ResponseHeader
-}
-
-func NewMutex(s *Session, pfx string) *Mutex {
-	return &Mutex{s, pfx + "/", "", -1, nil}
-}
-
-// TryLock locks the mutex if not already locked by another session.
-// If lock is held by another session, return immediately after attempting necessary cleanup
-// The ctx argument is used for the sending/receiving Txn RPC.
-func (m *Mutex) TryLock(ctx context.Context) error {
-	resp, err := m.tryAcquire(ctx)
-	if err != nil {
-		return err
-	}
-	// if no key on prefix / the minimum rev is key, already hold the lock
-	ownerKey := resp.Responses[1].GetResponseRange().Kvs
-	if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
-		m.hdr = resp.Header
-		return nil
-	}
-	client := m.s.Client()
-	// Cannot lock, so delete the key
-	if _, err := client.Delete(ctx, m.myKey); err != nil {
-		return err
-	}
-	m.myKey = "\x00"
-	m.myRev = -1
-	return ErrLocked
-}
-
-// Lock locks the mutex with a cancelable context. If the context is canceled
-// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
-func (m *Mutex) Lock(ctx context.Context) error {
-	resp, err := m.tryAcquire(ctx)
-	if err != nil {
-		return err
-	}
-	// if no key on prefix / the minimum rev is key, already hold the lock
-	ownerKey := resp.Responses[1].GetResponseRange().Kvs
-	if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
-		m.hdr = resp.Header
-		return nil
-	}
-	client := m.s.Client()
-	// wait for deletion revisions prior to myKey
-	hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
-	// release lock key if wait failed
-	if werr != nil {
-		m.Unlock(client.Ctx())
-	} else {
-		m.hdr = hdr
-	}
-	return werr
-}
-
-func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
-	s := m.s
-	client := m.s.Client()
-
-	m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
-	cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
-	// put self in lock waiters via myKey; oldest waiter holds lock
-	put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
-	// reuse key in case this session already holds the lock
-	get := v3.OpGet(m.myKey)
-	// fetch current holder to complete uncontended path with only one RPC
-	getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
-	resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
-	if err != nil {
-		return nil, err
-	}
-	m.myRev = resp.Header.Revision
-	if !resp.Succeeded {
-		m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
-	}
-	return resp, nil
-}
-
-func (m *Mutex) Unlock(ctx context.Context) error {
-	client := m.s.Client()
-	if _, err := client.Delete(ctx, m.myKey); err != nil {
-		return err
-	}
-	m.myKey = "\x00"
-	m.myRev = -1
-	return nil
-}
-
-func (m *Mutex) IsOwner() v3.Cmp {
-	return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
-}
-
-func (m *Mutex) Key() string { return m.myKey }
-
-// Header is the response header received from etcd on acquiring the lock.
-func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
-
-type lockerMutex struct{ *Mutex }
-
-func (lm *lockerMutex) Lock() {
-	client := lm.s.Client()
-	if err := lm.Mutex.Lock(client.Ctx()); err != nil {
-		panic(err)
-	}
-}
-func (lm *lockerMutex) Unlock() {
-	client := lm.s.Client()
-	if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
-		panic(err)
-	}
-}
-
-// NewLocker creates a sync.Locker backed by an etcd mutex.
-func NewLocker(s *Session, pfx string) sync.Locker {
-	return &lockerMutex{NewMutex(s, pfx)}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
deleted file mode 100644
index 97eb763..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/session.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
-	"context"
-	"time"
-
-	v3 "go.etcd.io/etcd/clientv3"
-)
-
-const defaultSessionTTL = 60
-
-// Session represents a lease kept alive for the lifetime of a client.
-// Fault-tolerant applications may use sessions to reason about liveness.
-type Session struct {
-	client *v3.Client
-	opts   *sessionOptions
-	id     v3.LeaseID
-
-	cancel context.CancelFunc
-	donec  <-chan struct{}
-}
-
-// NewSession gets the leased session for a client.
-func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
-	ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
-	for _, opt := range opts {
-		opt(ops)
-	}
-
-	id := ops.leaseID
-	if id == v3.NoLease {
-		resp, err := client.Grant(ops.ctx, int64(ops.ttl))
-		if err != nil {
-			return nil, err
-		}
-		id = resp.ID
-	}
-
-	ctx, cancel := context.WithCancel(ops.ctx)
-	keepAlive, err := client.KeepAlive(ctx, id)
-	if err != nil || keepAlive == nil {
-		cancel()
-		return nil, err
-	}
-
-	donec := make(chan struct{})
-	s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
-
-	// keep the lease alive until client error or cancelled context
-	go func() {
-		defer close(donec)
-		for range keepAlive {
-			// eat messages until keep alive channel closes
-		}
-	}()
-
-	return s, nil
-}
-
-// Client is the etcd client that is attached to the session.
-func (s *Session) Client() *v3.Client {
-	return s.client
-}
-
-// Lease is the lease ID for keys bound to the session.
-func (s *Session) Lease() v3.LeaseID { return s.id }
-
-// Done returns a channel that closes when the lease is orphaned, expires, or
-// is otherwise no longer being refreshed.
-func (s *Session) Done() <-chan struct{} { return s.donec }
-
-// Orphan ends the refresh for the session lease. This is useful
-// in case the state of the client connection is indeterminate (revoke
-// would fail) or when transferring lease ownership.
-func (s *Session) Orphan() {
-	s.cancel()
-	<-s.donec
-}
-
-// Close orphans the session and revokes the session lease.
-func (s *Session) Close() error {
-	s.Orphan()
-	// if revoke takes longer than the ttl, lease is expired anyway
-	ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
-	_, err := s.client.Revoke(ctx, s.id)
-	cancel()
-	return err
-}
-
-type sessionOptions struct {
-	ttl     int
-	leaseID v3.LeaseID
-	ctx     context.Context
-}
-
-// SessionOption configures Session.
-type SessionOption func(*sessionOptions)
-
-// WithTTL configures the session's TTL in seconds.
-// If TTL is <= 0, the default 60 seconds TTL will be used.
-func WithTTL(ttl int) SessionOption {
-	return func(so *sessionOptions) {
-		if ttl > 0 {
-			so.ttl = ttl
-		}
-	}
-}
-
-// WithLease specifies the existing leaseID to be used for the session.
-// This is useful in process restart scenario, for example, to reclaim
-// leadership from an election prior to restart.
-func WithLease(leaseID v3.LeaseID) SessionOption {
-	return func(so *sessionOptions) {
-		so.leaseID = leaseID
-	}
-}
-
-// WithContext assigns a context to the session instead of defaulting to
-// using the client context. This is useful for canceling NewSession and
-// Close operations immediately without having to close the client. If the
-// context is canceled before Close() completes, the session's lease will be
-// abandoned and left to expire instead of being revoked.
-func WithContext(ctx context.Context) SessionOption {
-	return func(so *sessionOptions) {
-		so.ctx = ctx
-	}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go b/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
deleted file mode 100644
index ee11510..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/concurrency/stm.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
-	"context"
-	"math"
-
-	v3 "go.etcd.io/etcd/clientv3"
-)
-
-// STM is an interface for software transactional memory.
-type STM interface {
-	// Get returns the value for a key and inserts the key in the txn's read set.
-	// If Get fails, it aborts the transaction with an error, never returning.
-	Get(key ...string) string
-	// Put adds a value for a key to the write set.
-	Put(key, val string, opts ...v3.OpOption)
-	// Rev returns the revision of a key in the read set.
-	Rev(key string) int64
-	// Del deletes a key.
-	Del(key string)
-
-	// commit attempts to apply the txn's changes to the server.
-	commit() *v3.TxnResponse
-	reset()
-}
-
-// Isolation is an enumeration of transactional isolation levels which
-// describes how transactions should interfere and conflict.
-type Isolation int
-
-const (
-	// SerializableSnapshot provides serializable isolation and also checks
-	// for write conflicts.
-	SerializableSnapshot Isolation = iota
-	// Serializable reads within the same transaction attempt return data
-	// from the at the revision of the first read.
-	Serializable
-	// RepeatableReads reads within the same transaction attempt always
-	// return the same data.
-	RepeatableReads
-	// ReadCommitted reads keys from any committed revision.
-	ReadCommitted
-)
-
-// stmError safely passes STM errors through panic to the STM error channel.
-type stmError struct{ err error }
-
-type stmOptions struct {
-	iso      Isolation
-	ctx      context.Context
-	prefetch []string
-}
-
-type stmOption func(*stmOptions)
-
-// WithIsolation specifies the transaction isolation level.
-func WithIsolation(lvl Isolation) stmOption {
-	return func(so *stmOptions) { so.iso = lvl }
-}
-
-// WithAbortContext specifies the context for permanently aborting the transaction.
-func WithAbortContext(ctx context.Context) stmOption {
-	return func(so *stmOptions) { so.ctx = ctx }
-}
-
-// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
-// If an STM transaction will unconditionally fetch a set of keys, prefetching
-// those keys will save the round-trip cost from requesting each key one by one
-// with Get().
-func WithPrefetch(keys ...string) stmOption {
-	return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
-}
-
-// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
-func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
-	opts := &stmOptions{ctx: c.Ctx()}
-	for _, f := range so {
-		f(opts)
-	}
-	if len(opts.prefetch) != 0 {
-		f := apply
-		apply = func(s STM) error {
-			s.Get(opts.prefetch...)
-			return f(s)
-		}
-	}
-	return runSTM(mkSTM(c, opts), apply)
-}
-
-func mkSTM(c *v3.Client, opts *stmOptions) STM {
-	switch opts.iso {
-	case SerializableSnapshot:
-		s := &stmSerializable{
-			stm:      stm{client: c, ctx: opts.ctx},
-			prefetch: make(map[string]*v3.GetResponse),
-		}
-		s.conflicts = func() []v3.Cmp {
-			return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
-		}
-		return s
-	case Serializable:
-		s := &stmSerializable{
-			stm:      stm{client: c, ctx: opts.ctx},
-			prefetch: make(map[string]*v3.GetResponse),
-		}
-		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
-		return s
-	case RepeatableReads:
-		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
-		s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
-		return s
-	case ReadCommitted:
-		s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
-		s.conflicts = func() []v3.Cmp { return nil }
-		return s
-	default:
-		panic("unsupported stm")
-	}
-}
-
-type stmResponse struct {
-	resp *v3.TxnResponse
-	err  error
-}
-
-func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
-	outc := make(chan stmResponse, 1)
-	go func() {
-		defer func() {
-			if r := recover(); r != nil {
-				e, ok := r.(stmError)
-				if !ok {
-					// client apply panicked
-					panic(r)
-				}
-				outc <- stmResponse{nil, e.err}
-			}
-		}()
-		var out stmResponse
-		for {
-			s.reset()
-			if out.err = apply(s); out.err != nil {
-				break
-			}
-			if out.resp = s.commit(); out.resp != nil {
-				break
-			}
-		}
-		outc <- out
-	}()
-	r := <-outc
-	return r.resp, r.err
-}
-
-// stm implements repeatable-read software transactional memory over etcd
-type stm struct {
-	client *v3.Client
-	ctx    context.Context
-	// rset holds read key values and revisions
-	rset readSet
-	// wset holds overwritten keys and their values
-	wset writeSet
-	// getOpts are the opts used for gets
-	getOpts []v3.OpOption
-	// conflicts computes the current conflicts on the txn
-	conflicts func() []v3.Cmp
-}
-
-type stmPut struct {
-	val string
-	op  v3.Op
-}
-
-type readSet map[string]*v3.GetResponse
-
-func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
-	for i, resp := range txnresp.Responses {
-		rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
-	}
-}
-
-// first returns the store revision from the first fetch
-func (rs readSet) first() int64 {
-	ret := int64(math.MaxInt64 - 1)
-	for _, resp := range rs {
-		if rev := resp.Header.Revision; rev < ret {
-			ret = rev
-		}
-	}
-	return ret
-}
-
-// cmps guards the txn from updates to read set
-func (rs readSet) cmps() []v3.Cmp {
-	cmps := make([]v3.Cmp, 0, len(rs))
-	for k, rk := range rs {
-		cmps = append(cmps, isKeyCurrent(k, rk))
-	}
-	return cmps
-}
-
-type writeSet map[string]stmPut
-
-func (ws writeSet) get(keys ...string) *stmPut {
-	for _, key := range keys {
-		if wv, ok := ws[key]; ok {
-			return &wv
-		}
-	}
-	return nil
-}
-
-// cmps returns a cmp list testing no writes have happened past rev
-func (ws writeSet) cmps(rev int64) []v3.Cmp {
-	cmps := make([]v3.Cmp, 0, len(ws))
-	for key := range ws {
-		cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
-	}
-	return cmps
-}
-
-// puts is the list of ops for all pending writes
-func (ws writeSet) puts() []v3.Op {
-	puts := make([]v3.Op, 0, len(ws))
-	for _, v := range ws {
-		puts = append(puts, v.op)
-	}
-	return puts
-}
-
-func (s *stm) Get(keys ...string) string {
-	if wv := s.wset.get(keys...); wv != nil {
-		return wv.val
-	}
-	return respToValue(s.fetch(keys...))
-}
-
-func (s *stm) Put(key, val string, opts ...v3.OpOption) {
-	s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
-}
-
-func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
-
-func (s *stm) Rev(key string) int64 {
-	if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
-		return resp.Kvs[0].ModRevision
-	}
-	return 0
-}
-
-func (s *stm) commit() *v3.TxnResponse {
-	txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
-	if err != nil {
-		panic(stmError{err})
-	}
-	if txnresp.Succeeded {
-		return txnresp
-	}
-	return nil
-}
-
-func (s *stm) fetch(keys ...string) *v3.GetResponse {
-	if len(keys) == 0 {
-		return nil
-	}
-	ops := make([]v3.Op, len(keys))
-	for i, key := range keys {
-		if resp, ok := s.rset[key]; ok {
-			return resp
-		}
-		ops[i] = v3.OpGet(key, s.getOpts...)
-	}
-	txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
-	if err != nil {
-		panic(stmError{err})
-	}
-	s.rset.add(keys, txnresp)
-	return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
-}
-
-func (s *stm) reset() {
-	s.rset = make(map[string]*v3.GetResponse)
-	s.wset = make(map[string]stmPut)
-}
-
-type stmSerializable struct {
-	stm
-	prefetch map[string]*v3.GetResponse
-}
-
-func (s *stmSerializable) Get(keys ...string) string {
-	if wv := s.wset.get(keys...); wv != nil {
-		return wv.val
-	}
-	firstRead := len(s.rset) == 0
-	for _, key := range keys {
-		if resp, ok := s.prefetch[key]; ok {
-			delete(s.prefetch, key)
-			s.rset[key] = resp
-		}
-	}
-	resp := s.stm.fetch(keys...)
-	if firstRead {
-		// txn's base revision is defined by the first read
-		s.getOpts = []v3.OpOption{
-			v3.WithRev(resp.Header.Revision),
-			v3.WithSerializable(),
-		}
-	}
-	return respToValue(resp)
-}
-
-func (s *stmSerializable) Rev(key string) int64 {
-	s.Get(key)
-	return s.stm.Rev(key)
-}
-
-func (s *stmSerializable) gets() ([]string, []v3.Op) {
-	keys := make([]string, 0, len(s.rset))
-	ops := make([]v3.Op, 0, len(s.rset))
-	for k := range s.rset {
-		keys = append(keys, k)
-		ops = append(ops, v3.OpGet(k))
-	}
-	return keys, ops
-}
-
-func (s *stmSerializable) commit() *v3.TxnResponse {
-	keys, getops := s.gets()
-	txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
-	// use Else to prefetch keys in case of conflict to save a round trip
-	txnresp, err := txn.Else(getops...).Commit()
-	if err != nil {
-		panic(stmError{err})
-	}
-	if txnresp.Succeeded {
-		return txnresp
-	}
-	// load prefetch with Else data
-	s.rset.add(keys, txnresp)
-	s.prefetch = s.rset
-	s.getOpts = nil
-	return nil
-}
-
-func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
-	if len(r.Kvs) != 0 {
-		return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
-	}
-	return v3.Compare(v3.ModRevision(k), "=", 0)
-}
-
-func respToValue(resp *v3.GetResponse) string {
-	if resp == nil || len(resp.Kvs) == 0 {
-		return ""
-	}
-	return string(resp.Kvs[0].Value)
-}
-
-// NewSTMRepeatable is deprecated.
-func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
-	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
-}
-
-// NewSTMSerializable is deprecated.
-func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
-	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
-}
-
-// NewSTMReadCommitted is deprecated.
-func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
-	return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e6457e8..7ff3d91 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -65,23 +65,22 @@
 ## explicit
 github.com/opencord/omci-lib-go
 github.com/opencord/omci-lib-go/generated
-# github.com/opencord/voltha-lib-go/v4 v4.3.4
+# github.com/opencord/voltha-lib-go/v5 v5.0.2
 ## explicit
-github.com/opencord/voltha-lib-go/v4/pkg/adapters
-github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif
-github.com/opencord/voltha-lib-go/v4/pkg/adapters/common
-github.com/opencord/voltha-lib-go/v4/pkg/config
-github.com/opencord/voltha-lib-go/v4/pkg/db
-github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore
-github.com/opencord/voltha-lib-go/v4/pkg/events
-github.com/opencord/voltha-lib-go/v4/pkg/events/eventif
-github.com/opencord/voltha-lib-go/v4/pkg/flows
-github.com/opencord/voltha-lib-go/v4/pkg/kafka
-github.com/opencord/voltha-lib-go/v4/pkg/log
-github.com/opencord/voltha-lib-go/v4/pkg/probe
-github.com/opencord/voltha-lib-go/v4/pkg/techprofile
-github.com/opencord/voltha-lib-go/v4/pkg/version
-# github.com/opencord/voltha-protos/v4 v4.1.10
+github.com/opencord/voltha-lib-go/v5/pkg/adapters
+github.com/opencord/voltha-lib-go/v5/pkg/adapters/adapterif
+github.com/opencord/voltha-lib-go/v5/pkg/adapters/common
+github.com/opencord/voltha-lib-go/v5/pkg/config
+github.com/opencord/voltha-lib-go/v5/pkg/db
+github.com/opencord/voltha-lib-go/v5/pkg/db/kvstore
+github.com/opencord/voltha-lib-go/v5/pkg/events
+github.com/opencord/voltha-lib-go/v5/pkg/events/eventif
+github.com/opencord/voltha-lib-go/v5/pkg/flows
+github.com/opencord/voltha-lib-go/v5/pkg/kafka
+github.com/opencord/voltha-lib-go/v5/pkg/log
+github.com/opencord/voltha-lib-go/v5/pkg/probe
+github.com/opencord/voltha-lib-go/v5/pkg/version
+# github.com/opencord/voltha-protos/v4 v4.2.0
 ## explicit
 github.com/opencord/voltha-protos/v4/go/common
 github.com/opencord/voltha-protos/v4/go/ext/config
@@ -136,7 +135,6 @@
 go.etcd.io/etcd/clientv3/balancer/connectivity
 go.etcd.io/etcd/clientv3/balancer/picker
 go.etcd.io/etcd/clientv3/balancer/resolver/endpoint
-go.etcd.io/etcd/clientv3/concurrency
 go.etcd.io/etcd/clientv3/credentials
 go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
 go.etcd.io/etcd/etcdserver/etcdserverpb