VOL-2017 library code was moved to voltha-lib-go

Change-Id: Ia4b27695cf7ec3377a64572972998d52b70a0a5f
diff --git a/Makefile b/Makefile
index d87af77..649b10b 100644
--- a/Makefile
+++ b/Makefile
@@ -96,6 +96,13 @@
 	cp ../voltha-protos/dist/*.tar.gz python/local_imports/voltha-protos/dist/
 endif
 
+## Local Development Helpers
+local-lib-go:
+ifdef LOCAL_LIB_GO
+	mkdir -p vendor/github.com/opencord/voltha-lib-go/pkg
+	cp -r ${GOPATH}/src/github.com/opencord/voltha-lib-go/pkg/* vendor/github.com/opencord/voltha-lib-go/pkg/
+endif
+
 local-pyvoltha:
 	@mkdir -p python/local_imports
 ifdef LOCAL_PYVOLTHA
@@ -128,10 +135,10 @@
 
 docker-build: rw_core ro_core ofagent cli
 
-rw_core: local-protos
+rw_core: local-protos local-lib-go
 	docker build $(DOCKER_BUILD_ARGS) -t ${RWCORE_IMAGENAME}:${DOCKER_TAG} -t ${RWCORE_IMAGENAME}:latest -f docker/Dockerfile.rw_core .
 
-ro_core: local-protos
+ro_core: local-protos local-lib-go
 	docker build $(DOCKER_BUILD_ARGS) -t ${ROCORE_IMAGENAME}:${DOCKER_TAG} -t ${ROCORE_IMAGENAME}:latest -f docker/Dockerfile.ro_core .
 
 ofagent: local-protos local-pyvoltha
@@ -222,7 +229,7 @@
 	@mkdir -p ./sca-report
 	$(GOLANGCI_LINT_TOOL) run --out-format junit-xml ./... 2>&1 | tee ./sca-report/sca-report.xml
 
-test: go_junit_install gocover_cobertura_install
+test: go_junit_install gocover_cobertura_install local-lib-go
 	@mkdir -p ./tests/results
 	@go test -mod=vendor -v -coverprofile ./tests/results/go-test-coverage.out -covermode count ./... 2>&1 | tee ./tests/results/go-test-results.out ;\
 	RETURN=$$? ;\
@@ -237,4 +244,8 @@
 distclean: clean
 	rm -rf ${VENVDIR} ./sca_report
 
+mod-update:
+	go mod tidy
+	go mod vendor
+
 # end file
diff --git a/common/core/northbound/grpc/default_api_handler.go b/common/core/northbound/grpc/default_api_handler.go
index 6ac7599..2d374eb 100644
--- a/common/core/northbound/grpc/default_api_handler.go
+++ b/common/core/northbound/grpc/default_api_handler.go
@@ -19,7 +19,7 @@
 	"context"
 	"errors"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/common"
 	"github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/common/log/log_test.go b/common/log/log_test.go
deleted file mode 100644
index 88794b2..0000000
--- a/common/log/log_test.go
+++ /dev/null
@@ -1,105 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package log
-
-import (
-	"github.com/stretchr/testify/assert"
-	"google.golang.org/grpc/grpclog"
-	"testing"
-)
-
-/*
-Prerequite:  Start the kafka/zookeeper containers.
-*/
-
-var testLogger Logger
-
-func TestInit(t *testing.T) {
-	var err error
-	testLogger, err = AddPackage(JSON, ErrorLevel, nil)
-	assert.NotNil(t, testLogger)
-	assert.Nil(t, err)
-}
-
-func verifyLogLevel(t *testing.T, minimumLevel int) {
-	SetAllLogLevel(minimumLevel)
-	var success bool
-	for i := 0; i < 6; i++ {
-		success = testLogger.V(i)
-		if i == 1 && minimumLevel == 2 {
-			// TODO: Update the test when a new version of Zap logger is available.  It has a bug with that
-			// specific combination
-			continue
-		}
-		if i < minimumLevel {
-			assert.False(t, success)
-		} else {
-			assert.True(t, success)
-		}
-	}
-}
-
-func TestLogLevelDebug(t *testing.T) {
-	for i := 0; i < 6; i++ {
-		verifyLogLevel(t, i)
-	}
-}
-
-func TestUpdateAllLoggers(t *testing.T) {
-	err := UpdateAllLoggers(Fields{"update": "update"})
-	assert.Nil(t, err)
-}
-
-func TestUpdateLoggers(t *testing.T) {
-	testLogger, err := UpdateLogger(Fields{"update": "update"})
-	assert.Nil(t, err)
-	assert.NotNil(t, testLogger)
-}
-
-func TestUseAsGrpcLoggerV2(t *testing.T) {
-	var grpcLogger grpclog.LoggerV2
-	thisLogger, _ := AddPackage(JSON, ErrorLevel, nil)
-	grpcLogger = thisLogger
-	assert.NotNil(t, grpcLogger)
-}
-
-func TestUpdateLogLevel(t *testing.T) {
-	//	Let's create a bunch of logger each with a separate package
-	myLoggers := make(map[string]Logger)
-	pkgNames := []string{"/rw_core/core", "/db/model", "/kafka"}
-	for _, name := range pkgNames {
-		myLoggers[name], _ = AddPackage(JSON, ErrorLevel, nil, []string{name}...)
-	}
-	//Test updates to log levels
-	levels := []int{0, 1, 2, 3, 4, 5}
-	for _, expectedLevel := range levels {
-		for _, name := range pkgNames {
-			SetPackageLogLevel(name, expectedLevel)
-			l, err := GetPackageLogLevel(name)
-			assert.Nil(t, err)
-			assert.Equal(t, l, expectedLevel)
-		}
-	}
-	//Test set all package level
-	for _, expectedLevel := range levels {
-		SetAllLogLevel(expectedLevel)
-		for _, name := range pkgNames {
-			l, err := GetPackageLogLevel(name)
-			assert.Nil(t, err)
-			assert.Equal(t, l, expectedLevel)
-		}
-	}
-}
diff --git a/common/ponresourcemanager/ponresourcemanager.go b/common/ponresourcemanager/ponresourcemanager.go
deleted file mode 100755
index 1544b8d..0000000
--- a/common/ponresourcemanager/ponresourcemanager.go
+++ /dev/null
@@ -1,1180 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ponresourcemanager
-
-import (
-	"encoding/base64"
-	"encoding/json"
-	"errors"
-	"fmt"
-	"strconv"
-
-	bitmap "github.com/boljen/go-bitmap"
-	"github.com/opencord/voltha-go/common/log"
-	tp "github.com/opencord/voltha-go/common/techprofile"
-	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/db/model"
-)
-
-const (
-	//Constants to identify resource pool
-	UNI_ID     = "UNI_ID"
-	ONU_ID     = "ONU_ID"
-	ALLOC_ID   = "ALLOC_ID"
-	GEMPORT_ID = "GEMPORT_ID"
-	FLOW_ID    = "FLOW_ID"
-
-	//Constants for passing command line arugments
-	OLT_MODEL_ARG = "--olt_model"
-	PATH_PREFIX   = "service/voltha/resource_manager/{%s}"
-	/*The resource ranges for a given device model should be placed
-	  at 'resource_manager/<technology>/resource_ranges/<olt_model_type>'
-	  path on the KV store.
-	  If Resource Range parameters are to be read from the external KV store,
-	  they are expected to be stored in the following format.
-	  Note: All parameters are MANDATORY for now.
-	  constants used as keys to reference the resource range parameters from
-	  and external KV store.
-	*/
-	UNI_ID_START_IDX      = "uni_id_start"
-	UNI_ID_END_IDX        = "uni_id_end"
-	ONU_ID_START_IDX      = "onu_id_start"
-	ONU_ID_END_IDX        = "onu_id_end"
-	ONU_ID_SHARED_IDX     = "onu_id_shared"
-	ALLOC_ID_START_IDX    = "alloc_id_start"
-	ALLOC_ID_END_IDX      = "alloc_id_end"
-	ALLOC_ID_SHARED_IDX   = "alloc_id_shared"
-	GEMPORT_ID_START_IDX  = "gemport_id_start"
-	GEMPORT_ID_END_IDX    = "gemport_id_end"
-	GEMPORT_ID_SHARED_IDX = "gemport_id_shared"
-	FLOW_ID_START_IDX     = "flow_id_start"
-	FLOW_ID_END_IDX       = "flow_id_end"
-	FLOW_ID_SHARED_IDX    = "flow_id_shared"
-	NUM_OF_PON_PORT       = "pon_ports"
-
-	/*
-	   The KV store backend is initialized with a path prefix and we need to
-	   provide only the suffix.
-	*/
-	PON_RESOURCE_RANGE_CONFIG_PATH = "resource_ranges/%s"
-
-	//resource path suffix
-	//Path on the KV store for storing alloc id ranges and resource pool for a given interface
-	//Format: <device_id>/alloc_id_pool/<pon_intf_id>
-	ALLOC_ID_POOL_PATH = "{%s}/alloc_id_pool/{%d}"
-	//Path on the KV store for storing gemport id ranges and resource pool for a given interface
-	//Format: <device_id>/gemport_id_pool/<pon_intf_id>
-	GEMPORT_ID_POOL_PATH = "{%s}/gemport_id_pool/{%d}"
-	//Path on the KV store for storing onu id ranges and resource pool for a given interface
-	//Format: <device_id>/onu_id_pool/<pon_intf_id>
-	ONU_ID_POOL_PATH = "{%s}/onu_id_pool/{%d}"
-	//Path on the KV store for storing flow id ranges and resource pool for a given interface
-	//Format: <device_id>/flow_id_pool/<pon_intf_id>
-	FLOW_ID_POOL_PATH = "{%s}/flow_id_pool/{%d}"
-
-	//Path on the KV store for storing list of alloc IDs for a given ONU
-	//Format: <device_id>/<(pon_intf_id, onu_id)>/alloc_ids
-	ALLOC_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/alloc_ids"
-
-	//Path on the KV store for storing list of gemport IDs for a given ONU
-	//Format: <device_id>/<(pon_intf_id, onu_id)>/gemport_ids
-	GEMPORT_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/gemport_ids"
-
-	//Path on the KV store for storing list of Flow IDs for a given ONU
-	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_ids
-	FLOW_ID_RESOURCE_MAP_PATH = "{%s}/{%s}/flow_ids"
-
-	//Flow Id info: Use to store more metadata associated with the flow_id
-	//Format: <device_id>/<(pon_intf_id, onu_id)>/flow_id_info/<flow_id>
-	FLOW_ID_INFO_PATH = "{%s}/{%s}/flow_id_info/{%d}"
-
-	//Constants for internal usage.
-	PON_INTF_ID     = "pon_intf_id"
-	START_IDX       = "start_idx"
-	END_IDX         = "end_idx"
-	POOL            = "pool"
-	NUM_OF_PON_INTF = 16
-
-	KVSTORE_RETRY_TIMEOUT = 5
-)
-
-//type ResourceTypeIndex string
-//type ResourceType string
-
-type PONResourceManager struct {
-	//Implements APIs to initialize/allocate/release alloc/gemport/onu IDs.
-	Technology     string
-	DeviceType     string
-	DeviceID       string
-	Backend        string // ETCD, or consul
-	Host           string // host ip of the KV store
-	Port           int    // port number for the KV store
-	OLTModel       string
-	KVStore        *model.Backend
-	TechProfileMgr tp.TechProfileIf // create object of *tp.TechProfileMgr
-
-	// Below attribute, pon_resource_ranges, should be initialized
-	// by reading from KV store.
-	PonResourceRanges  map[string]interface{}
-	SharedResourceMgrs map[string]*PONResourceManager
-	SharedIdxByType    map[string]string
-	IntfIDs            []uint32 // list of pon interface IDs
-	Globalorlocal      string
-}
-
-func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
-	log.Infow("kv-store-type", log.Fields{"store": storeType})
-	switch storeType {
-	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
-	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout)
-	}
-	return nil, errors.New("unsupported-kv-store")
-}
-
-func SetKVClient(Technology string, Backend string, Host string, Port int) *model.Backend {
-	addr := Host + ":" + strconv.Itoa(Port)
-	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
-	// issue between kv store and backend , core is not calling NewBackend directly
-	kvClient, err := newKVClient(Backend, addr, KVSTORE_RETRY_TIMEOUT)
-	if err != nil {
-		log.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
-		return nil
-	}
-	kvbackend := &model.Backend{
-		Client:     kvClient,
-		StoreType:  Backend,
-		Host:       Host,
-		Port:       Port,
-		Timeout:    KVSTORE_RETRY_TIMEOUT,
-		PathPrefix: fmt.Sprintf(PATH_PREFIX, Technology)}
-
-	return kvbackend
-}
-
-// NewPONResourceManager creates a new PON resource manager.
-func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Host string, Port int) (*PONResourceManager, error) {
-	var PONMgr PONResourceManager
-	PONMgr.Technology = Technology
-	PONMgr.DeviceType = DeviceType
-	PONMgr.DeviceID = DeviceID
-	PONMgr.Backend = Backend
-	PONMgr.Host = Host
-	PONMgr.Port = Port
-	PONMgr.KVStore = SetKVClient(Technology, Backend, Host, Port)
-	if PONMgr.KVStore == nil {
-		log.Error("KV Client initilization failed")
-		return nil, errors.New("Failed to init KV client")
-	}
-	// Initialize techprofile for this technology
-	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Host, Port); PONMgr.TechProfileMgr == nil {
-		log.Error("Techprofile initialization failed")
-		return nil, errors.New("Failed to init tech profile")
-	}
-	PONMgr.PonResourceRanges = make(map[string]interface{})
-	PONMgr.SharedResourceMgrs = make(map[string]*PONResourceManager)
-	PONMgr.SharedIdxByType = make(map[string]string)
-	PONMgr.SharedIdxByType[ONU_ID] = ONU_ID_SHARED_IDX
-	PONMgr.SharedIdxByType[ALLOC_ID] = ALLOC_ID_SHARED_IDX
-	PONMgr.SharedIdxByType[GEMPORT_ID] = GEMPORT_ID_SHARED_IDX
-	PONMgr.SharedIdxByType[FLOW_ID] = FLOW_ID_SHARED_IDX
-	PONMgr.IntfIDs = make([]uint32, NUM_OF_PON_INTF)
-	PONMgr.OLTModel = DeviceType
-	return &PONMgr, nil
-}
-
-/*
-  Initialize PON resource ranges with config fetched from kv store.
-  return boolean: True if PON resource ranges initialized else false
-  Try to initialize the PON Resource Ranges from KV store based on the
-  OLT model key, if available
-*/
-
-func (PONRMgr *PONResourceManager) InitResourceRangesFromKVStore() bool {
-	//Initialize PON resource ranges with config fetched from kv store.
-	//:return boolean: True if PON resource ranges initialized else false
-	// Try to initialize the PON Resource Ranges from KV store based on the
-	// OLT model key, if available
-	if PONRMgr.OLTModel == "" {
-		log.Error("Failed to get OLT model")
-		return false
-	}
-	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
-	//get resource from kv store
-	Result, err := PONRMgr.KVStore.Get(Path)
-	if err != nil {
-		log.Debugf("Error in fetching resource %s from KV strore", Path)
-		return false
-	}
-	if Result == nil {
-		log.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
-		return false
-	}
-	//update internal ranges from kv ranges. If there are missing
-	// values in the KV profile, continue to use the defaults
-	Value, err := ToByte(Result.Value)
-	if err != nil {
-		log.Error("Failed to convert kvpair to byte string")
-		return false
-	}
-	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
-		log.Error("Failed to Unmarshal json byte")
-		return false
-	}
-	log.Debug("Init resource ranges from kvstore success")
-	return true
-}
-
-func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
-	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
-	/*
-	   Update the ranges for all reosurce type in the intermnal maps
-	   param: resource type start index
-	   param: start ID
-	   param: resource type end index
-	   param: end ID
-	   param: resource type shared index
-	   param: shared pool id
-	   param: global resource manager
-	*/
-	log.Debugf("update ranges for %s, %d", StartIDx, StartID)
-
-	if StartID != 0 {
-		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
-			PONRMgr.PonResourceRanges[StartIDx] = StartID
-		}
-	}
-	if EndID != 0 {
-		if (PONRMgr.PonResourceRanges[EndIDx] == nil) || (PONRMgr.PonResourceRanges[EndIDx].(uint32) > EndID) {
-			PONRMgr.PonResourceRanges[EndIDx] = EndID
-		}
-	}
-	//if SharedPoolID != 0 {
-	PONRMgr.PonResourceRanges[SharedIDx] = SharedPoolID
-	//}
-	if RMgr != nil {
-		PONRMgr.SharedResourceMgrs[SharedIDx] = RMgr
-	}
-}
-
-func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
-	ONUIDEnd uint32,
-	ONUIDSharedPoolID uint32,
-	AllocIDStart uint32,
-	AllocIDEnd uint32,
-	AllocIDSharedPoolID uint32,
-	GEMPortIDStart uint32,
-	GEMPortIDEnd uint32,
-	GEMPortIDSharedPoolID uint32,
-	FlowIDStart uint32,
-	FlowIDEnd uint32,
-	FlowIDSharedPoolID uint32,
-	UNIIDStart uint32,
-	UNIIDEnd uint32,
-	NoOfPONPorts uint32,
-	IntfIDs []uint32) bool {
-
-	/*Initialize default PON resource ranges
-
-	  :param onu_id_start_idx: onu id start index
-	  :param onu_id_end_idx: onu id end index
-	  :param onu_id_shared_pool_id: pool idx for id shared by all intfs or None for no sharing
-	  :param alloc_id_start_idx: alloc id start index
-	  :param alloc_id_end_idx: alloc id end index
-	  :param alloc_id_shared_pool_id: pool idx for alloc id shared by all intfs or None for no sharing
-	  :param gemport_id_start_idx: gemport id start index
-	  :param gemport_id_end_idx: gemport id end index
-	  :param gemport_id_shared_pool_id: pool idx for gemport id shared by all intfs or None for no sharing
-	  :param flow_id_start_idx: flow id start index
-	  :param flow_id_end_idx: flow id end index
-	  :param flow_id_shared_pool_id: pool idx for flow id shared by all intfs or None for no sharing
-	  :param num_of_pon_ports: number of PON ports
-	  :param intf_ids: interfaces serviced by this manager
-	*/
-	PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
-	log.Debug("Initialize default range values")
-	var i uint32
-	if IntfIDs == nil {
-		for i = 0; i < NoOfPONPorts; i++ {
-			PONRMgr.IntfIDs = append(PONRMgr.IntfIDs, i)
-		}
-	} else {
-		PONRMgr.IntfIDs = IntfIDs
-	}
-	return true
-}
-
-func (PONRMgr *PONResourceManager) InitDeviceResourcePool() error {
-
-	//Initialize resource pool for all PON ports.
-
-	log.Debug("Init resource ranges")
-
-	var err error
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if err = PONRMgr.InitResourceIDPool(Intf, ONU_ID,
-			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
-			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init ONU ID resource pool")
-			return err
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if err = PONRMgr.InitResourceIDPool(Intf, ALLOC_ID,
-			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
-			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init ALLOC ID resource pool ")
-			return err
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if err = PONRMgr.InitResourceIDPool(Intf, GEMPORT_ID,
-			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
-			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init GEMPORT ID resource pool")
-			return err
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if err = PONRMgr.InitResourceIDPool(Intf, FLOW_ID,
-			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
-			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
-			log.Error("Failed to init FLOW ID resource pool")
-			return err
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-	return err
-}
-
-func (PONRMgr *PONResourceManager) ClearDeviceResourcePool() error {
-
-	//Clear resource pool for all PON ports.
-
-	log.Debug("Clear resource ranges")
-
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if status := PONRMgr.ClearResourceIDPool(Intf, ONU_ID); status != true {
-			log.Error("Failed to clear ONU ID resource pool")
-			return errors.New("Failed to clear ONU ID resource pool")
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[ALLOC_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if status := PONRMgr.ClearResourceIDPool(Intf, ALLOC_ID); status != true {
-			log.Error("Failed to clear ALLOC ID resource pool ")
-			return errors.New("Failed to clear ALLOC ID resource pool")
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[GEMPORT_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if status := PONRMgr.ClearResourceIDPool(Intf, GEMPORT_ID); status != true {
-			log.Error("Failed to clear GEMPORT ID resource pool")
-			return errors.New("Failed to clear GEMPORT ID resource pool")
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-
-	for _, Intf := range PONRMgr.IntfIDs {
-		SharedPoolID := PONRMgr.PonResourceRanges[FLOW_ID_SHARED_IDX].(uint32)
-		if SharedPoolID != 0 {
-			Intf = SharedPoolID
-		}
-		if status := PONRMgr.ClearResourceIDPool(Intf, FLOW_ID); status != true {
-			log.Error("Failed to clear FLOW ID resource pool")
-			return errors.New("Failed to clear FLOW ID resource pool")
-		}
-		if SharedPoolID != 0 {
-			break
-		}
-	}
-	return nil
-}
-
-func (PONRMgr *PONResourceManager) InitResourceIDPool(Intf uint32, ResourceType string, StartID uint32, EndID uint32) error {
-
-	/*Initialize Resource ID pool for a given Resource Type on a given PON Port
-
-	  :param pon_intf_id: OLT PON interface id
-	  :param resource_type: String to identify type of resource
-	  :param start_idx: start index for onu id pool
-	  :param end_idx: end index for onu id pool
-	  :return boolean: True if resource id pool initialized else false
-	*/
-
-	// delegate to the master instance if sharing enabled across instances
-	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
-	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
-		return SharedResourceMgr.InitResourceIDPool(Intf, ResourceType, StartID, EndID)
-	}
-
-	Path := PONRMgr.GetPath(Intf, ResourceType)
-	if Path == "" {
-		log.Errorf("Failed to get path for resource type %s", ResourceType)
-		return errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
-	}
-
-	//In case of adapter reboot and reconciliation resource in kv store
-	//checked for its presence if not kv store update happens
-	Res, err := PONRMgr.GetResource(Path)
-	if (err == nil) && (Res != nil) {
-		log.Debugf("Resource %s already present in store ", Path)
-		return nil
-	} else {
-		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID)
-		if err != nil {
-			log.Errorf("Failed to format resource")
-			return err
-		}
-		// Add resource as json in kv store.
-		err = PONRMgr.KVStore.Put(Path, FormatResult)
-		if err == nil {
-			log.Debug("Successfuly posted to kv store")
-			return err
-		}
-	}
-
-	log.Debug("Error initializing pool")
-
-	return err
-}
-
-func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32) ([]byte, error) {
-	/*
-	   Format resource as json.
-	   :param pon_intf_id: OLT PON interface id
-	   :param start_idx: start index for id pool
-	   :param end_idx: end index for id pool
-	   :return dictionary: resource formatted as map
-	*/
-	// Format resource as json to be stored in backend store
-	Resource := make(map[string]interface{})
-	Resource[PON_INTF_ID] = IntfID
-	Resource[START_IDX] = StartIDx
-	Resource[END_IDX] = EndIDx
-	/*
-	   Resource pool stored in backend store as binary string.
-	   Tracking the resource allocation will be done by setting the bits \
-	   in the byte array. The index set will be the resource number allocated.
-	*/
-	var TSData *bitmap.Threadsafe
-	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
-		log.Error("Failed to create a bitmap")
-		return nil, errors.New("Failed to create bitmap")
-	}
-	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
-
-	Value, err := json.Marshal(Resource)
-	if err != nil {
-		log.Errorf("Failed to marshall resource")
-		return nil, err
-	}
-	return Value, err
-}
-func (PONRMgr *PONResourceManager) GetResource(Path string) (map[string]interface{}, error) {
-	/*
-	   Get resource from kv store.
-
-	   :param path: path to get resource
-	   :return: resource if resource present in kv store else None
-	*/
-	//get resource from kv store
-
-	var Value []byte
-	Result := make(map[string]interface{})
-	var Str string
-
-	Resource, err := PONRMgr.KVStore.Get(Path)
-	if (err != nil) || (Resource == nil) {
-		log.Debugf("Resource  unavailable at %s", Path)
-		return nil, err
-	}
-
-	Value, err = ToByte(Resource.Value)
-
-	// decode resource fetched from backend store to dictionary
-	err = json.Unmarshal(Value, &Result)
-	if err != nil {
-		log.Error("Failed to decode resource")
-		return Result, err
-	}
-	/*
-	   resource pool in backend store stored as binary string whereas to
-	   access the pool to generate/release IDs it need to be converted
-	   as BitArray
-	*/
-	Str, err = ToString(Result[POOL])
-	if err != nil {
-		log.Error("Failed to conver to kv pair to string")
-		return Result, err
-	}
-	Decode64, _ := base64.StdEncoding.DecodeString(Str)
-	Result[POOL], err = ToByte(Decode64)
-	if err != nil {
-		log.Error("Failed to convert resource pool to byte")
-		return Result, err
-	}
-
-	return Result, err
-}
-
-func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
-	/*
-	   Get path for given resource type.
-	   :param pon_intf_id: OLT PON interface id
-	   :param resource_type: String to identify type of resource
-	   :return: path for given resource type
-	*/
-
-	/*
-	   Get the shared pool for the given resource type.
-	   all the resource ranges and the shared resource maps are initialized during the init.
-	*/
-	SharedPoolID := PONRMgr.PonResourceRanges[PONRMgr.SharedIdxByType[ResourceType]].(uint32)
-	if SharedPoolID != 0 {
-		IntfID = SharedPoolID
-	}
-	var Path string
-	if ResourceType == ONU_ID {
-		Path = fmt.Sprintf(ONU_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
-	} else if ResourceType == ALLOC_ID {
-		Path = fmt.Sprintf(ALLOC_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
-	} else if ResourceType == GEMPORT_ID {
-		Path = fmt.Sprintf(GEMPORT_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
-	} else if ResourceType == FLOW_ID {
-		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
-	} else {
-		log.Error("Invalid resource pool identifier")
-	}
-	return Path
-}
-
-func (PONRMgr *PONResourceManager) GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error) {
-	/*
-	   Create alloc/gemport/onu/flow id for given OLT PON interface.
-	   :param pon_intf_id: OLT PON interface id
-	   :param resource_type: String to identify type of resource
-	   :param num_of_id: required number of ids
-	   :return list/uint32/None: list, uint32 or None if resource type is
-	    alloc_id/gemport_id, onu_id or invalid type respectively
-	*/
-	if NumIDs < 1 {
-		log.Error("Invalid number of resources requested")
-		return nil, errors.New(fmt.Sprintf("Invalid number of resources requested %d", NumIDs))
-	}
-	// delegate to the master instance if sharing enabled across instances
-
-	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
-	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
-		return SharedResourceMgr.GetResourceID(IntfID, ResourceType, NumIDs)
-	}
-	log.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
-
-	Path := PONRMgr.GetPath(IntfID, ResourceType)
-	if Path == "" {
-		log.Errorf("Failed to get path for resource type %s", ResourceType)
-		return nil, errors.New(fmt.Sprintf("Failed to get path for resource type %s", ResourceType))
-	}
-	log.Debugf("Get resource for type %s on path %s", ResourceType, Path)
-	var Result []uint32
-	var NextID uint32
-	Resource, err := PONRMgr.GetResource(Path)
-	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
-		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-			log.Error("Failed to Generate ID")
-			return Result, err
-		}
-		Result = append(Result, NextID)
-	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
-		if NumIDs == 1 {
-			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-				log.Error("Failed to Generate ID")
-				return Result, err
-			}
-			Result = append(Result, NextID)
-		} else {
-			for NumIDs > 0 {
-				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
-					log.Error("Failed to Generate ID")
-					return Result, err
-				}
-				Result = append(Result, NextID)
-				NumIDs--
-			}
-		}
-	} else {
-		log.Error("get resource failed")
-		return Result, err
-	}
-
-	//Update resource in kv store
-	if PONRMgr.UpdateResource(Path, Resource) != nil {
-		log.Errorf("Failed to update resource %s", Path)
-		return nil, errors.New(fmt.Sprintf("Failed to update resource %s", Path))
-	}
-	return Result, nil
-}
-
-func checkValidResourceType(ResourceType string) bool {
-	KnownResourceTypes := []string{ONU_ID, ALLOC_ID, GEMPORT_ID, FLOW_ID}
-
-	for _, v := range KnownResourceTypes {
-		if v == ResourceType {
-			return true
-		}
-	}
-	return false
-}
-
-func (PONRMgr *PONResourceManager) FreeResourceID(IntfID uint32, ResourceType string, ReleaseContent []uint32) bool {
-	/*
-	   Release alloc/gemport/onu/flow id for given OLT PON interface.
-	   :param pon_intf_id: OLT PON interface id
-	   :param resource_type: String to identify type of resource
-	   :param release_content: required number of ids
-	   :return boolean: True if all IDs in given release_content release else False
-	*/
-	if checkValidResourceType(ResourceType) == false {
-		log.Error("Invalid resource type")
-		return false
-	}
-	if ReleaseContent == nil {
-		log.Debug("Nothing to release")
-		return true
-	}
-	// delegate to the master instance if sharing enabled across instances
-	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
-	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
-		return SharedResourceMgr.FreeResourceID(IntfID, ResourceType, ReleaseContent)
-	}
-	Path := PONRMgr.GetPath(IntfID, ResourceType)
-	if Path == "" {
-		log.Error("Failed to get path")
-		return false
-	}
-	Resource, err := PONRMgr.GetResource(Path)
-	if err != nil {
-		log.Error("Failed to get resource")
-		return false
-	}
-	for _, Val := range ReleaseContent {
-		PONRMgr.ReleaseID(Resource, Val)
-	}
-	if PONRMgr.UpdateResource(Path, Resource) != nil {
-		log.Errorf("Free resource for %s failed", Path)
-		return false
-	}
-	return true
-}
-
-func (PONRMgr *PONResourceManager) UpdateResource(Path string, Resource map[string]interface{}) error {
-	/*
-	   Update resource in resource kv store.
-	   :param path: path to update resource
-	   :param resource: resource need to be updated
-	   :return boolean: True if resource updated in kv store else False
-	*/
-	// TODO resource[POOL] = resource[POOL].bin
-	Value, err := json.Marshal(Resource)
-	if err != nil {
-		log.Error("failed to Marshal")
-		return err
-	}
-	err = PONRMgr.KVStore.Put(Path, Value)
-	if err != nil {
-		log.Error("failed to put data to kv store %s", Path)
-		return err
-	}
-	return nil
-}
-
-func (PONRMgr *PONResourceManager) ClearResourceIDPool(IntfID uint32, ResourceType string) bool {
-	/*
-	   Clear Resource Pool for a given Resource Type on a given PON Port.
-	   :return boolean: True if removed else False
-	*/
-
-	// delegate to the master instance if sharing enabled across instances
-	SharedResourceMgr := PONRMgr.SharedResourceMgrs[PONRMgr.SharedIdxByType[ResourceType]]
-	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
-		return SharedResourceMgr.ClearResourceIDPool(IntfID, ResourceType)
-	}
-	Path := PONRMgr.GetPath(IntfID, ResourceType)
-	if Path == "" {
-		log.Error("Failed to get path")
-		return false
-	}
-
-	if err := PONRMgr.KVStore.Delete(Path); err != nil {
-		log.Errorf("Failed to delete resource %s", Path)
-		return false
-	}
-	log.Debugf("Cleared resource %s", Path)
-	return true
-}
-
-func (PONRMgr PONResourceManager) InitResourceMap(PONIntfONUID string) {
-	/*
-	   Initialize resource map
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	*/
-	// initialize pon_intf_onu_id tuple to alloc_ids map
-	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
-	var AllocIDs []byte
-	Result := PONRMgr.KVStore.Put(AllocIDPath, AllocIDs)
-	if Result != nil {
-		log.Error("Failed to update the KV store")
-		return
-	}
-	// initialize pon_intf_onu_id tuple to gemport_ids map
-	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
-	var GEMPortIDs []byte
-	Result = PONRMgr.KVStore.Put(GEMPortIDPath, GEMPortIDs)
-	if Result != nil {
-		log.Error("Failed to update the KV store")
-		return
-	}
-}
-
-func (PONRMgr PONResourceManager) RemoveResourceMap(PONIntfONUID string) bool {
-	/*
-	   Remove resource map
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	*/
-	// remove pon_intf_onu_id tuple to alloc_ids map
-	var err error
-	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
-	if err = PONRMgr.KVStore.Delete(AllocIDPath); err != nil {
-		log.Errorf("Failed to remove resource %s", AllocIDPath)
-		return false
-	}
-	// remove pon_intf_onu_id tuple to gemport_ids map
-	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
-	err = PONRMgr.KVStore.Delete(GEMPortIDPath)
-	if err != nil {
-		log.Errorf("Failed to remove resource %s", GEMPortIDPath)
-		return false
-	}
-
-	FlowIDPath := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
-	if FlowIDs, err := PONRMgr.KVStore.List(FlowIDPath); err != nil {
-		for _, Flow := range FlowIDs {
-			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
-			if err = PONRMgr.KVStore.Delete(FlowIDInfoPath); err != nil {
-				log.Errorf("Failed to remove resource %s", FlowIDInfoPath)
-				return false
-			}
-		}
-	}
-
-	if err = PONRMgr.KVStore.Delete(FlowIDPath); err != nil {
-		log.Errorf("Failed to remove resource %s", FlowIDPath)
-		return false
-	}
-
-	return true
-}
-
-func (PONRMgr *PONResourceManager) GetCurrentAllocIDForOnu(IntfONUID string) []uint32 {
-	/*
-	   Get currently configured alloc ids for given pon_intf_onu_id
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :return list: List of alloc_ids if available, else None
-	*/
-	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-
-	var Data []uint32
-	Value, err := PONRMgr.KVStore.Get(Path)
-	if err == nil {
-		if Value != nil {
-			Val, err := ToByte(Value.Value)
-			if err != nil {
-				log.Errorw("Failed to convert into byte array", log.Fields{"error": err})
-				return Data
-			}
-			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Error("Failed to unmarshal", log.Fields{"error": err})
-				return Data
-			}
-		}
-	}
-	return Data
-}
-
-func (PONRMgr *PONResourceManager) GetCurrentGEMPortIDsForOnu(IntfONUID string) []uint32 {
-	/*
-	   Get currently configured gemport ids for given pon_intf_onu_id
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :return list: List of gemport IDs if available, else None
-	*/
-
-	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	log.Debugf("Getting current gemports for %s", Path)
-	var Data []uint32
-	Value, err := PONRMgr.KVStore.Get(Path)
-	if err == nil {
-		if Value != nil {
-			Val, _ := ToByte(Value.Value)
-			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
-				return Data
-			}
-		}
-	} else {
-		log.Errorf("Failed to get data from kvstore for %s", Path)
-	}
-	return Data
-}
-
-func (PONRMgr *PONResourceManager) GetCurrentFlowIDsForOnu(IntfONUID string) []uint32 {
-	/*
-	   Get currently configured flow ids for given pon_intf_onu_id
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :return list: List of Flow IDs if available, else None
-	*/
-
-	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-
-	var Data []uint32
-	Value, err := PONRMgr.KVStore.Get(Path)
-	if err == nil {
-		if Value != nil {
-			Val, _ := ToByte(Value.Value)
-			if err = json.Unmarshal(Val, &Data); err != nil {
-				log.Error("Failed to unmarshal")
-				return Data
-			}
-		}
-	}
-	return Data
-}
-
-func (PONRMgr *PONResourceManager) GetFlowIDInfo(IntfONUID string, FlowID uint32, Data interface{}) error {
-	/*
-	   Get flow details configured for the ONU.
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param flow_id: Flow Id reference
-	   :param Data: Result
-	   :return error: nil if no error in getting from KV store
-	*/
-
-	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
-
-	Value, err := PONRMgr.KVStore.Get(Path)
-	if err == nil {
-		if Value != nil {
-			Val, err := ToByte(Value.Value)
-			if err != nil {
-				log.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
-				return err
-			}
-			if err = json.Unmarshal(Val, Data); err != nil {
-				log.Errorw("Failed to unmarshal", log.Fields{"error": err})
-				return err
-			}
-		}
-	}
-	return err
-}
-
-func (PONRMgr *PONResourceManager) RemoveFlowIDInfo(IntfONUID string, FlowID uint32) bool {
-	/*
-	   Get flow_id details configured for the ONU.
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param flow_id: Flow Id reference
-	*/
-	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
-
-	if err := PONRMgr.KVStore.Delete(Path); err != nil {
-		log.Errorf("Falied to remove resource %s", Path)
-		return false
-	}
-	return true
-}
-
-func (PONRMgr *PONResourceManager) UpdateAllocIdsForOnu(IntfONUID string, AllocIDs []uint32) error {
-	/*
-	   Update currently configured alloc ids for given pon_intf_onu_id
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param alloc_ids: list of alloc ids
-	*/
-	var Value []byte
-	var err error
-	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	Value, err = json.Marshal(AllocIDs)
-	if err != nil {
-		log.Error("failed to Marshal")
-		return err
-	}
-
-	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
-		return err
-	}
-	return err
-}
-
-func (PONRMgr *PONResourceManager) UpdateGEMPortIDsForOnu(IntfONUID string, GEMPortIDs []uint32) error {
-	/*
-	   Update currently configured gemport ids for given pon_intf_onu_id
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param gemport_ids: list of gem port ids
-	*/
-
-	var Value []byte
-	var err error
-	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	log.Debugf("Updating gemport ids for %s", Path)
-	Value, err = json.Marshal(GEMPortIDs)
-	if err != nil {
-		log.Error("failed to Marshal")
-		return err
-	}
-
-	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
-		return err
-	}
-	return err
-}
-
-func checkForFlowIDInList(FlowIDList []uint32, FlowID uint32) (bool, uint32) {
-	/*
-	   Check for a flow id in a given list of flow IDs.
-	   :param FLowIDList: List of Flow IDs
-	   :param FlowID: Flowd to check in the list
-	   : return true and the index if present false otherwise.
-	*/
-
-	for idx, _ := range FlowIDList {
-		if FlowID == FlowIDList[idx] {
-			return true, uint32(idx)
-		}
-	}
-	return false, 0
-}
-
-func (PONRMgr *PONResourceManager) UpdateFlowIDForOnu(IntfONUID string, FlowID uint32, Add bool) error {
-	/*
-	   Update the flow_id list of the ONU (add or remove flow_id from the list)
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param flow_id: flow ID
-	   :param add: Boolean flag to indicate whether the flow_id should be
-	               added or removed from the list. Defaults to adding the flow.
-	*/
-	var Value []byte
-	var err error
-	var RetVal bool
-	var IDx uint32
-	Path := fmt.Sprintf(FLOW_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	FlowIDs := PONRMgr.GetCurrentFlowIDsForOnu(IntfONUID)
-
-	if Add {
-		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == true {
-			return err
-		}
-		FlowIDs = append(FlowIDs, FlowID)
-	} else {
-		if RetVal, IDx = checkForFlowIDInList(FlowIDs, FlowID); RetVal == false {
-			return err
-		}
-		// delete the index and shift
-		FlowIDs = append(FlowIDs[:IDx], FlowIDs[IDx+1:]...)
-	}
-	Value, err = json.Marshal(FlowIDs)
-	if err != nil {
-		log.Error("Failed to Marshal")
-		return err
-	}
-
-	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
-		return err
-	}
-	return err
-}
-
-func (PONRMgr *PONResourceManager) UpdateFlowIDInfoForOnu(IntfONUID string, FlowID uint32, FlowData interface{}) error {
-	/*
-	   Update any metadata associated with the flow_id. The flow_data could be json
-	   or any of other data structure. The resource manager doesnt care
-	   :param pon_intf_onu_id: reference of PON interface id and onu id
-	   :param flow_id: Flow ID
-	   :param flow_data: Flow data blob
-	*/
-	var Value []byte
-	var err error
-	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
-	Value, err = json.Marshal(FlowData)
-	if err != nil {
-		log.Error("failed to Marshal")
-		return err
-	}
-
-	if err = PONRMgr.KVStore.Put(Path, Value); err != nil {
-		log.Errorf("Failed to update resource %s", Path)
-		return err
-	}
-	return err
-}
-
-func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
-	/*
-	   Generate unique id having OFFSET as start
-	   :param resource: resource used to generate ID
-	   :return uint32: generated id
-	*/
-	ByteArray, err := ToByte(Resource[POOL])
-	if err != nil {
-		log.Error("Failed to convert resource to byte array")
-		return 0, err
-	}
-	Data := bitmap.TSFromData(ByteArray, false)
-	if Data == nil {
-		log.Error("Failed to get data from byte array")
-		return 0, errors.New("Failed to get data from byte array")
-	}
-
-	Len := Data.Len()
-	var Idx int
-	for Idx = 0; Idx < Len; Idx++ {
-		Val := Data.Get(Idx)
-		if Val == false {
-			break
-		}
-	}
-	Data.Set(Idx, true)
-	res := uint32(Resource[START_IDX].(float64))
-	Resource[POOL] = Data.Data(false)
-	log.Debugf("Generated ID for %d", (uint32(Idx) + res))
-	return (uint32(Idx) + res), err
-}
-
-func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
-	/*
-	   Release unique id having OFFSET as start index.
-	   :param resource: resource used to release ID
-	   :param unique_id: id need to be released
-	*/
-	ByteArray, err := ToByte(Resource[POOL])
-	if err != nil {
-		log.Error("Failed to convert resource to byte array")
-		return false
-	}
-	Data := bitmap.TSFromData(ByteArray, false)
-	if Data == nil {
-		log.Error("Failed to get resource pool")
-		return false
-	}
-	var Idx uint32
-	Idx = Id - uint32(Resource[START_IDX].(float64))
-	Data.Set(int(Idx), false)
-	Resource[POOL] = Data.Data(false)
-
-	return true
-}
-
-func (PONRMgr *PONResourceManager) GetTechnology() string {
-	return PONRMgr.Technology
-}
-
-func (PONRMgr *PONResourceManager) GetResourceTypeAllocID() string {
-	return ALLOC_ID
-}
-
-func (PONRMgr *PONResourceManager) GetResourceTypeGemPortID() string {
-	return GEMPORT_ID
-}
-
-// ToByte converts an interface value to a []byte.  The interface should either be of
-// a string type or []byte.  Otherwise, an error is returned.
-func ToByte(value interface{}) ([]byte, error) {
-	switch t := value.(type) {
-	case []byte:
-		return value.([]byte), nil
-	case string:
-		return []byte(value.(string)), nil
-	default:
-		return nil, fmt.Errorf("unexpected-type-%T", t)
-	}
-}
-
-// ToString converts an interface value to a string.  The interface should either be of
-// a string type or []byte.  Otherwise, an error is returned.
-func ToString(value interface{}) (string, error) {
-	switch t := value.(type) {
-	case []byte:
-		return string(value.([]byte)), nil
-	case string:
-		return value.(string), nil
-	default:
-		return "", fmt.Errorf("unexpected-type-%T", t)
-	}
-}
diff --git a/common/probe/probe_test.go b/common/probe/probe_test.go
deleted file mode 100644
index a7edc7f..0000000
--- a/common/probe/probe_test.go
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package probe
-
-import (
-	"context"
-	"encoding/json"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/stretchr/testify/assert"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"testing"
-)
-
-func init() {
-	log.AddPackage(log.JSON, log.WarnLevel, nil)
-}
-
-func TestServiceStatusString(t *testing.T) {
-	assert.Equal(t, "Unknown", ServiceStatusUnknown.String(), "ServiceStatusUnknown")
-	assert.Equal(t, "Preparing", ServiceStatusPreparing.String(), "ServiceStatusPreparing")
-	assert.Equal(t, "Prepared", ServiceStatusPrepared.String(), "ServiceStatusPrepared")
-	assert.Equal(t, "Running", ServiceStatusRunning.String(), "ServiceStatusRunning")
-	assert.Equal(t, "Stopped", ServiceStatusStopped.String(), "ServiceStatusStopped")
-	assert.Equal(t, "Failed", ServiceStatusFailed.String(), "ServiceStatusFailed")
-}
-
-func AlwaysTrue(map[string]ServiceStatus) bool {
-	return true
-}
-
-func AlwaysFalse(map[string]ServiceStatus) bool {
-	return false
-}
-
-func TestWithFuncs(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
-
-	assert.NotNil(t, p.readyFunc, "ready func not set")
-	assert.True(t, p.readyFunc(nil), "ready func not set correctly")
-	assert.NotNil(t, p.healthFunc, "health func not set")
-	assert.False(t, p.healthFunc(nil), "health func not set correctly")
-}
-
-func TestWithReadyFuncOnly(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue)
-
-	assert.NotNil(t, p.readyFunc, "ready func not set")
-	assert.True(t, p.readyFunc(nil), "ready func not set correctly")
-	assert.Nil(t, p.healthFunc, "health func set")
-}
-
-func TestWithHealthFuncOnly(t *testing.T) {
-	p := (&Probe{}).WithHealthFunc(AlwaysTrue)
-
-	assert.Nil(t, p.readyFunc, "ready func set")
-	assert.NotNil(t, p.healthFunc, "health func not set")
-	assert.True(t, p.healthFunc(nil), "health func not set correctly")
-}
-
-func TestRegisterOneService(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one")
-
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-
-	_, ok := p.status["one"]
-	assert.True(t, ok, "service not found")
-}
-
-func TestRegisterMultipleServices(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one", "two", "three", "four")
-
-	assert.Equal(t, 4, len(p.status), "wrong number of services")
-
-	_, ok := p.status["one"]
-	assert.True(t, ok, "service one not found")
-	_, ok = p.status["two"]
-	assert.True(t, ok, "service two not found")
-	_, ok = p.status["three"]
-	assert.True(t, ok, "service three not found")
-	_, ok = p.status["four"]
-	assert.True(t, ok, "service four not found")
-}
-
-func TestRegisterMultipleServicesIncremental(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one")
-	p.RegisterService("two")
-	p.RegisterService("three", "four")
-
-	assert.Equal(t, 4, len(p.status), "wrong number of services")
-
-	_, ok := p.status["one"]
-	assert.True(t, ok, "service one not found")
-	_, ok = p.status["two"]
-	assert.True(t, ok, "service two not found")
-	_, ok = p.status["three"]
-	assert.True(t, ok, "service three not found")
-	_, ok = p.status["four"]
-	assert.True(t, ok, "service four not found")
-}
-
-func TestRegisterMultipleServicesDuplicates(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one", "one", "one", "two")
-
-	assert.Equal(t, 2, len(p.status), "wrong number of services")
-
-	_, ok := p.status["one"]
-	assert.True(t, ok, "service one not found")
-	_, ok = p.status["two"]
-	assert.True(t, ok, "service two not found")
-}
-
-func TestRegisterMultipleServicesDuplicatesIncremental(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one")
-	p.RegisterService("one")
-	p.RegisterService("one", "two")
-
-	assert.Equal(t, 2, len(p.status), "wrong number of services")
-
-	_, ok := p.status["one"]
-	assert.True(t, ok, "service one not found")
-	_, ok = p.status["two"]
-	assert.True(t, ok, "service two not found")
-}
-
-func TestUpdateStatus(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusRunning)
-
-	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
-	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
-}
-
-func TestRegisterOverwriteStatus(t *testing.T) {
-	p := &Probe{}
-
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusRunning)
-
-	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
-	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
-
-	p.RegisterService("one", "three")
-	assert.Equal(t, 3, len(p.status), "wrong number of services")
-	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status overridden")
-	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
-	assert.Equal(t, ServiceStatusUnknown, p.status["three"], "status set")
-}
-
-func TestDetailzWithServies(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService("one", "two")
-
-	req := httptest.NewRequest("GET", "http://example.com/detailz", nil)
-	w := httptest.NewRecorder()
-	p.detailzFunc(w, req)
-	resp := w.Result()
-	body, _ := ioutil.ReadAll(resp.Body)
-
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for no services")
-	assert.Equal(t, "application/json", resp.Header.Get("Content-Type"), "wrong content type")
-	var vals map[string]string
-	err := json.Unmarshal(body, &vals)
-	assert.Nil(t, err, "unable to unmarshal values")
-	assert.Equal(t, "Unknown", vals["one"], "wrong value")
-	assert.Equal(t, "Unknown", vals["two"], "wrong value")
-}
-
-func TestReadzNoServices(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue)
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for no services")
-}
-
-func TestReadzWithServicesWithTrue(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService("one", "two")
-
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
-}
-
-func TestReadzWithServicesWithDefault(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for registered only services")
-}
-
-func TestReadzNpServicesDefault(t *testing.T) {
-	p := &Probe{}
-
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
-}
-
-func TestReadzWithServicesDefault(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusRunning)
-	p.UpdateStatus("two", ServiceStatusRunning)
-
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code")
-}
-
-func TestReadzWithServicesDefaultOne(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusRunning)
-
-	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
-	w := httptest.NewRecorder()
-	p.readzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
-}
-
-func TestHealthzNoServices(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue)
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code for no services")
-}
-
-func TestHealthzWithServicesWithTrue(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService("one", "two")
-
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
-}
-
-func TestHealthzWithServicesWithDefault(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code for registered only services")
-}
-
-func TestHealthzNoServicesDefault(t *testing.T) {
-	p := &Probe{}
-
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
-}
-
-func TestHealthzWithServicesDefault(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusRunning)
-	p.UpdateStatus("two", ServiceStatusRunning)
-
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusOK, resp.StatusCode, "invalid status code")
-}
-
-func TestHealthzWithServicesDefaultFailed(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one", "two")
-	p.UpdateStatus("one", ServiceStatusFailed)
-
-	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
-	w := httptest.NewRecorder()
-	p.healthzFunc(w, req)
-	resp := w.Result()
-	assert.Equal(t, http.StatusTeapot, resp.StatusCode, "invalid status code")
-}
-
-func TestSetFuncsToNil(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
-	p.WithReadyFunc(nil).WithHealthFunc(nil)
-	assert.Nil(t, p.readyFunc, "ready func not reset to nil")
-	assert.Nil(t, p.healthFunc, "health func not reset to nil")
-}
-
-func TestUpdateStatusFromContext(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one")
-	ctx := context.WithValue(context.Background(), ProbeContextKey, p)
-	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
-
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-	_, ok := p.status["one"]
-	assert.True(t, ok, "unable to find registered service")
-	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set correctly from context")
-
-}
-
-func TestUpdateStatusFromNilContext(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one")
-	UpdateStatusFromContext(nil, "one", ServiceStatusRunning)
-
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-	_, ok := p.status["one"]
-	assert.True(t, ok, "unable to find registered service")
-	assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
-
-}
-
-func TestUpdateStatusFromContextWithoutProbe(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one")
-	ctx := context.Background()
-	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
-
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-	_, ok := p.status["one"]
-	assert.True(t, ok, "unable to find registered service")
-	assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
-
-}
-
-func TestUpdateStatusFromContextWrongType(t *testing.T) {
-	p := &Probe{}
-	p.RegisterService("one")
-	ctx := context.WithValue(context.Background(), ProbeContextKey, "Teapot")
-	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
-
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-	_, ok := p.status["one"]
-	assert.True(t, ok, "unable to find registered service")
-	assert.Equal(t, ServiceStatusUnknown, p.status["one"], "status not set correctly from context")
-}
-
-func TestUpdateStatusNoRegistered(t *testing.T) {
-	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
-
-	p.UpdateStatus("one", ServiceStatusRunning)
-	assert.Equal(t, 1, len(p.status), "wrong number of services")
-	_, ok := p.status["one"]
-	assert.True(t, ok, "unable to find registered service")
-	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set correctly from context")
-}
diff --git a/common/techprofile/4QueueHybridProfileMap1.json b/common/techprofile/4QueueHybridProfileMap1.json
deleted file mode 100644
index d11f8e4..0000000
--- a/common/techprofile/4QueueHybridProfileMap1.json
+++ /dev/null
@@ -1,141 +0,0 @@
- {
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "AdditionalBW_Auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "Hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "AdditionalBW_Auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "Hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
diff --git a/common/techprofile/README.md b/common/techprofile/README.md
deleted file mode 100644
index 03a396d..0000000
--- a/common/techprofile/README.md
+++ /dev/null
@@ -1,336 +0,0 @@
-Technology Profile Management
-Overview
-Technology profiles that are utilized by VOLTHA are stored in a prescribed structure in VOLTHA's key/value store, which is currently etcd. The key structure used to access technology profiles is /voltha/technology_profiles//; where TID is the numeric ID of the technology profile and TECHNOLOGY specifies the technology being utilized by the adapter, e.g. xgspon. While the TID key is a directory, the TECHNOLOGY key should be set to the JSON data that represents the technology profile values.
-
-NOTE: The content of a technology profile represents a contract between the technology profile definition and all adapters that consume that technology profile. The structure and content of the profiles are outside the scope of Technology Profile Management. Technology profile management only specifies the key/value structure in which profiles are stored.
-
-Example JSON :
-
-{
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
-
-Creating Technology Profiles
-Technology profiles are a simple JSON object. This JSON object can be created using a variety of tools such as Vim, Emacs, or various IDEs. JQ can be a useful tool for validating a JSON object. Once a file is created with the JSON object it can be stored in VOLTHA key/value store using the standard etcd command line tool etcdctl or using an HTTP POST operation using Curl.
-
-Assuming you are in a standard VOLTHA deployment within a Kubernetes cluster you can access the etcd key/value store using kubectl via the PODs named etcd-cluster-0000, etcd-cluster-0001, or etcd-cluster-0002. For the examples in this document etcd-cluster-0000 will be used, but it really shouldn't matter which is used.
-
-ETCD version 3 is being used in techprofile module : Export this variable before using curl operation , export ETCDCTL_API=3 
-
-Assuming the Technology template is stored in a local file 4QueueHybridProfileMap1.json the following commands could be used to store or update the technical template into the proper location in the etcd key/value store:
-
-# Store a Technology template using etcdctl
-jq -c . 4QueueHybridProfileMap1.json | kubectl exec -i etcd-cluster-0000 -- etcdctl set service/voltha/technology_profiles/xgspon/64
-
-jq -c . 4QueueHybridProfileMap1.json |  etcdctl --endpoints=<ETCDIP>:2379 put service/voltha/technology_profiles/xgspon/64
-
-
-# Store a Technology template using curl
-curl -sSL -XPUT http://10.233.53.161:2379/v2/keys/service/voltha/technology_profiles/xgspon/64 -d value="$(jq -c . 4QueueHybridProfileMap1.json)"
-In the examples above, the command jq is used. This command can be installed using standard package management tools on most Linux systems. In the examples the "-c" option is used to compress the JSON. Using this tool is not necessary, and if you choose not to use the tool, you can replace "jq -c ," in the above examples with the "cat" command. More on jq can be found at https://stedolan.github.io/jq/.
-
-Listing Technical Profiles for a given Technology
-While both curl and etcdctl (via kubectl) can be used to list or view the available Technology profiles, etcdctl is easier, and thus will be used in the examples. For listing Technology profiles etcdctl ls is used. In can be used in conjunction with the -r option to recursively list profiles.
-
-
-#List Tech profile 
-etcdctl --endpoints=<EtcdIPAddres>:2379 get  service/voltha/technology_profiles/xgspon/64
-
-
-# Example output
-A specified Technology profile can be viewed with the etcdctl get command. (Again, jq is used for presentation purposes, and is not required)
-
-# Display a specified Technology profile, using jq to pretty print
-kubectl exec -i etcd-cluster-0000 -- etcdctl get /xgspon/64 | jq .
-
-etcdctl --endpoints=<ETCDIP>:2379 get  service/voltha/technology_profiles/xgspon/64
-# Example outpout
-service/voltha/technology_profiles/xgspon/64/uni-1
-{
-  "name": "4QueueHybridProfileMap1",
-  "profile_type": "XPON",
-  "version": 1,
-  "num_gem_ports": 4,
-  "instance_control": {
-    "onu": "multi-instance",
-    "uni": "single-instance",
-    "max_gem_payload_size": "auto"
-  },
-  "us_scheduler": {
-    "additional_bw": "auto",
-    "direction": "UPSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "ds_scheduler": {
-    "additional_bw": "auto",
-    "direction": "DOWNSTREAM",
-    "priority": 0,
-    "weight": 0,
-    "q_sched_policy": "hybrid"
-  },
-  "upstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "max_threshold": 0,
-        "min_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 75,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ],
-  "downstream_gem_port_attribute_list": [
-    {
-      "pbit_map": "0b00000101",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 4,
-      "weight": 10,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00011010",
-      "aes_encryption": "True",
-      "scheduling_policy": "WRR",
-      "priority_q": 3,
-      "weight": 90,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b00100000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 2,
-      "weight": 0,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    },
-    {
-      "pbit_map": "0b11000000",
-      "aes_encryption": "True",
-      "scheduling_policy": "StrictPriority",
-      "priority_q": 1,
-      "weight": 25,
-      "discard_policy": "TailDrop",
-      "max_q_size": "auto",
-      "discard_config": {
-        "min_threshold": 0,
-        "max_threshold": 0,
-        "max_probability": 0
-      }
-    }
-  ]
-}
-
-#Deleting Technology Profiles
-A technology profile or a technology profile tree can be removed using etcdctl rm.
-
-# Remove a specific technology profile
-kubectl exec -i etcd-cluster-0000 -- etcdctl rm /xgspon/64
-
-# Remove all technology profiles associated with Technology xgspon and ID 64(including the profile ID key)
-kubectl exec -i etcd-cluster-0000 -- etcdctl rm --dir -r /xgspon/64
diff --git a/common/techprofile/config.go b/common/techprofile/config.go
deleted file mode 100644
index 9d521ed..0000000
--- a/common/techprofile/config.go
+++ /dev/null
@@ -1,122 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package techprofile
-
-import (
-	"github.com/opencord/voltha-go/db/model"
-)
-
-// tech profile default constants
-const (
-	defaultTechProfileName        = "Default_1tcont_1gem_Profile"
-	DEFAULT_TECH_PROFILE_TABLE_ID = 64
-	defaultVersion                = 1.0
-	defaultLogLevel               = 0
-	defaultGemportsCount          = 1
-	defaultNumTconts              = 1
-	defaultPbits                  = "0b11111111"
-
-	defaultKVStoreType    = "etcd"
-	defaultKVStoreTimeout = 5 //in seconds
-	defaultKVStoreHost    = "127.0.0.1"
-	defaultKVStorePort    = 2379 // Consul = 8500; Etcd = 2379
-
-	// Tech profile path prefix in kv store
-	defaultKVPathPrefix = "service/voltha/technology_profiles"
-
-	// Tech profile path in kv store
-	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
-
-	// Tech profile instance path in kv store
-	// Format: <technology>/<tech_profile_tableID>/<uni_port_name>
-	defaultTPInstanceKVPath = "%s/%d/%s"
-)
-
-//Tech-Profile JSON String Keys
-// NOTE: Tech profile templeate JSON file should comply with below keys
-const (
-	NAME                               = "name"
-	PROFILE_TYPE                       = "profile_type"
-	VERSION                            = "version"
-	NUM_GEM_PORTS                      = "num_gem_ports"
-	INSTANCE_CONTROL                   = "instance_control"
-	US_SCHEDULER                       = "us_scheduler"
-	DS_SCHEDULER                       = "ds_scheduler"
-	UPSTREAM_GEM_PORT_ATTRIBUTE_LIST   = "upstream_gem_port_attribute_list"
-	DOWNSTREAM_GEM_PORT_ATTRIBUTE_LIST = "downstream_gem_port_attribute_list"
-	ONU                                = "onu"
-	UNI                                = "uni"
-	MAX_GEM_PAYLOAD_SIZE               = "max_gem_payload_size"
-	DIRECTION                          = "direction"
-	ADDITIONAL_BW                      = "additional_bw"
-	PRIORITY                           = "priority"
-	Q_SCHED_POLICY                     = "q_sched_policy"
-	WEIGHT                             = "weight"
-	PBIT_MAP                           = "pbit_map"
-	DISCARD_CONFIG                     = "discard_config"
-	MAX_THRESHOLD                      = "max_threshold"
-	MIN_THRESHOLD                      = "min_threshold"
-	MAX_PROBABILITY                    = "max_probability"
-	DISCARD_POLICY                     = "discard_policy"
-	PRIORITY_Q                         = "priority_q"
-	SCHEDULING_POLICY                  = "scheduling_policy"
-	MAX_Q_SIZE                         = "max_q_size"
-	AES_ENCRYPTION                     = "aes_encryption"
-)
-
-// TechprofileFlags represents the set of configurations used
-type TechProfileFlags struct {
-	KVStoreHost          string
-	KVStorePort          int
-	KVStoreType          string
-	KVStoreTimeout       int
-	KVBackend            *model.Backend
-	TPKVPathPrefix       string
-	TPFileKVPath         string
-	TPInstanceKVPath     string
-	DefaultTPName        string
-	TPVersion            int
-	NumGemPorts          uint32
-	NumTconts            uint32
-	DefaultPbits         []string
-	LogLevel             int
-	DefaultTechProfileID uint32
-	DefaultNumGemPorts   uint32
-	DefaultNumTconts     uint32
-}
-
-func NewTechProfileFlags(KVStoreType string, KVStoreHost string, KVStorePort int) *TechProfileFlags {
-	// initialize with default values
-	var techProfileFlags = TechProfileFlags{
-		KVBackend:            nil,
-		KVStoreHost:          KVStoreHost,
-		KVStorePort:          KVStorePort,
-		KVStoreType:          KVStoreType,
-		KVStoreTimeout:       defaultKVStoreTimeout,
-		DefaultTPName:        defaultTechProfileName,
-		TPKVPathPrefix:       defaultKVPathPrefix,
-		TPVersion:            defaultVersion,
-		TPFileKVPath:         defaultTechProfileKVPath,
-		TPInstanceKVPath:     defaultTPInstanceKVPath,
-		DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
-		DefaultNumGemPorts:   defaultGemportsCount,
-		DefaultNumTconts:     defaultNumTconts,
-		DefaultPbits:         []string{defaultPbits},
-		LogLevel:             defaultLogLevel,
-	}
-
-	return &techProfileFlags
-}
diff --git a/common/techprofile/tech_profile.go b/common/techprofile/tech_profile.go
deleted file mode 100644
index 9c34880..0000000
--- a/common/techprofile/tech_profile.go
+++ /dev/null
@@ -1,692 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"strconv"
-
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/db/model"
-	tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
-)
-
-// Interface to pon resource manager APIs
-type iPonResourceMgr interface {
-	GetResourceID(IntfID uint32, ResourceType string, NumIDs uint32) ([]uint32, error)
-	GetResourceTypeAllocID() string
-	GetResourceTypeGemPortID() string
-	GetTechnology() string
-}
-
-type Direction int32
-
-const (
-	Direction_UPSTREAM      Direction = 0
-	Direction_DOWNSTREAM    Direction = 1
-	Direction_BIDIRECTIONAL Direction = 2
-)
-
-var Direction_name = map[Direction]string{
-	0: "UPSTREAM",
-	1: "DOWNSTREAM",
-	2: "BIDIRECTIONAL",
-}
-
-type SchedulingPolicy int32
-
-const (
-	SchedulingPolicy_WRR            SchedulingPolicy = 0
-	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
-	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
-)
-
-var SchedulingPolicy_name = map[SchedulingPolicy]string{
-	0: "WRR",
-	1: "StrictPriority",
-	2: "Hybrid",
-}
-
-type AdditionalBW int32
-
-const (
-	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
-	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
-	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
-	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
-)
-
-var AdditionalBW_name = map[AdditionalBW]string{
-	0: "AdditionalBW_None",
-	1: "AdditionalBW_NA",
-	2: "AdditionalBW_BestEffort",
-	3: "AdditionalBW_Auto",
-}
-
-type DiscardPolicy int32
-
-const (
-	DiscardPolicy_TailDrop  DiscardPolicy = 0
-	DiscardPolicy_WTailDrop DiscardPolicy = 1
-	DiscardPolicy_Red       DiscardPolicy = 2
-	DiscardPolicy_WRed      DiscardPolicy = 3
-)
-
-var DiscardPolicy_name = map[DiscardPolicy]string{
-	0: "TailDrop",
-	1: "WTailDrop",
-	2: "Red",
-	3: "WRed",
-}
-
-/*
-type InferredAdditionBWIndication int32
-
-const (
-	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
-	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
-	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
-)
-
-var InferredAdditionBWIndication_name = map[int32]string{
-	0: "InferredAdditionBWIndication_None",
-	1: "InferredAdditionBWIndication_Assured",
-	2: "InferredAdditionBWIndication_BestEffort",
-}
-*/
-// instance control defaults
-const (
-	defaultOnuInstance    = "multi-instance"
-	defaultUniInstance    = "single-instance"
-	defaultNumGemPorts    = 1
-	defaultGemPayloadSize = "auto"
-)
-
-const MAX_GEM_PAYLOAD = "max_gem_payload_size"
-
-type InstanceControl struct {
-	Onu               string `json:"ONU"`
-	Uni               string `json:"uni"`
-	MaxGemPayloadSize string `json:"max_gem_payload_size"`
-}
-
-// default discard config constants
-const (
-	defaultMinThreshold   = 0
-	defaultMaxThreshold   = 0
-	defaultMaxProbability = 0
-)
-
-type DiscardConfig struct {
-	MinThreshold   int `json:"min_threshold"`
-	MaxThreshold   int `json:"max_threshold"`
-	MaxProbability int `json:"max_probability"`
-}
-
-// default scheduler contants
-const (
-	defaultAdditionalBw     = AdditionalBW_AdditionalBW_BestEffort
-	defaultPriority         = 0
-	defaultWeight           = 0
-	defaultQueueSchedPolicy = SchedulingPolicy_Hybrid
-)
-
-type Scheduler struct {
-	Direction    string `json:"direction"`
-	AdditionalBw string `json:"additional_bw"`
-	Priority     uint32 `json:"priority"`
-	Weight       uint32 `json:"weight"`
-	QSchedPolicy string `json:"q_sched_policy"`
-}
-
-// default GEM attribute constants
-const (
-	defaultAESEncryption  = "True"
-	defaultPriorityQueue  = 0
-	defaultQueueWeight    = 0
-	defaultMaxQueueSize   = "auto"
-	defaultdropPolicy     = DiscardPolicy_TailDrop
-	defaultSchedulePolicy = SchedulingPolicy_WRR
-)
-
-type GemPortAttribute struct {
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-}
-
-type iScheduler struct {
-	AllocID      uint32 `json:"alloc_id"`
-	Direction    string `json:"direction"`
-	AdditionalBw string `json:"additional_bw"`
-	Priority     uint32 `json:"priority"`
-	Weight       uint32 `json:"weight"`
-	QSchedPolicy string `json:"q_sched_policy"`
-}
-type iGemPortAttribute struct {
-	GemportID        uint32        `json:"gemport_id"`
-	MaxQueueSize     string        `json:"max_q_size"`
-	PbitMap          string        `json:"pbit_map"`
-	AesEncryption    string        `json:"aes_encryption"`
-	SchedulingPolicy string        `json:"scheduling_policy"`
-	PriorityQueue    uint32        `json:"priority_q"`
-	Weight           uint32        `json:"weight"`
-	DiscardPolicy    string        `json:"discard_policy"`
-	DiscardConfig    DiscardConfig `json:"discard_config"`
-}
-
-type TechProfileMgr struct {
-	config      *TechProfileFlags
-	resourceMgr iPonResourceMgr
-}
-type DefaultTechProfile struct {
-	Name                           string             `json:"name"`
-	ProfileType                    string             `json:"profile_type"`
-	Version                        int                `json:"version"`
-	NumGemPorts                    uint32             `json:"num_gem_ports"`
-	InstanceCtrl                   InstanceControl    `json:"instance_control"`
-	UsScheduler                    Scheduler          `json:"us_scheduler"`
-	DsScheduler                    Scheduler          `json:"ds_scheduler"`
-	UpstreamGemPortAttributeList   []GemPortAttribute `json:"upstream_gem_port_attribute_list"`
-	DownstreamGemPortAttributeList []GemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-type TechProfile struct {
-	Name                           string              `json:"name"`
-	SubscriberIdentifier           string              `json:"subscriber_identifier"`
-	ProfileType                    string              `json:"profile_type"`
-	Version                        int                 `json:"version"`
-	NumGemPorts                    uint32              `json:"num_gem_ports"`
-	NumTconts                      uint32              `json:"num_of_tconts"`
-	InstanceCtrl                   InstanceControl     `json:"instance_control"`
-	UsScheduler                    iScheduler          `json:"us_scheduler"`
-	DsScheduler                    iScheduler          `json:"ds_scheduler"`
-	UpstreamGemPortAttributeList   []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
-	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
-}
-
-func (t *TechProfileMgr) SetKVClient() *model.Backend {
-	addr := t.config.KVStoreHost + ":" + strconv.Itoa(t.config.KVStorePort)
-	kvClient, err := newKVClient(t.config.KVStoreType, addr, t.config.KVStoreTimeout)
-	if err != nil {
-		log.Errorw("failed-to-create-kv-client",
-			log.Fields{
-				"type": t.config.KVStoreType, "host": t.config.KVStoreHost, "port": t.config.KVStorePort,
-				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
-				"error": err.Error(),
-			})
-		return nil
-	}
-	return &model.Backend{
-		Client:     kvClient,
-		StoreType:  t.config.KVStoreType,
-		Host:       t.config.KVStoreHost,
-		Port:       t.config.KVStorePort,
-		Timeout:    t.config.KVStoreTimeout,
-		PathPrefix: t.config.TPKVPathPrefix}
-
-	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
-	            issue between kv store and backend , core is not calling NewBackend directly
-		   kv := model.NewBackend(t.config.KVStoreType, t.config.KVStoreHost, t.config.KVStorePort,
-										t.config.KVStoreTimeout,  kvStoreTechProfilePathPrefix)
-	*/
-}
-
-func newKVClient(storeType string, address string, timeout int) (kvstore.Client, error) {
-
-	log.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
-	switch storeType {
-	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
-	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout)
-	}
-	return nil, errors.New("unsupported-kv-store")
-}
-
-func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreHost string, KVStorePort int) (*TechProfileMgr, error) {
-	var techprofileObj TechProfileMgr
-	log.Debug("Initializing techprofile Manager")
-	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreHost, KVStorePort)
-	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
-	if techprofileObj.config.KVBackend == nil {
-		log.Error("Failed to initialize KV backend\n")
-		return nil, errors.New("KV backend init failed")
-	}
-	techprofileObj.resourceMgr = resourceMgr
-	log.Debug("Initializing techprofile object instance success")
-	return &techprofileObj, nil
-}
-
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
-	return fmt.Sprintf(t.config.TPInstanceKVPath, t.resourceMgr.GetTechnology(), techProfiletblID, uniPortName)
-}
-
-func (t *TechProfileMgr) GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error) {
-	var KvTpIns TechProfile
-	var resPtr *TechProfile = &KvTpIns
-	var err error
-	/*path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)*/
-	log.Infow("Getting tech profile instance from KV store", log.Fields{"path": path})
-	kvresult, err := t.config.KVBackend.Get(path)
-	if err != nil {
-		log.Errorw("Error while fetching tech-profile instance  from KV backend", log.Fields{"key": path})
-		return nil, err
-	}
-	if kvresult == nil {
-		log.Infow("Tech profile does not exist in KV store", log.Fields{"key": path})
-		resPtr = nil
-	} else {
-		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
-			if err = json.Unmarshal(value, resPtr); err != nil {
-				log.Errorw("Error while unmarshal KV result", log.Fields{"key": path, "value": value})
-			}
-		}
-	}
-	return resPtr, err
-}
-
-func (t *TechProfileMgr) addTechProfInstanceToKVStore(techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	log.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
-	tpInstanceJson, err := json.Marshal(*tpInstance)
-	if err == nil {
-		// Backend will convert JSON byte array into string format
-		log.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
-		err = t.config.KVBackend.Put(path, tpInstanceJson)
-	} else {
-		log.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
-	}
-	return err
-}
-func (t *TechProfileMgr) getTPFromKVStore(techProfiletblID uint32) *DefaultTechProfile {
-	var kvtechprofile DefaultTechProfile
-	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	log.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
-	kvresult, err := t.config.KVBackend.Get(key)
-	if err != nil {
-		log.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
-		return nil
-	}
-	if kvresult != nil {
-		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
-		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
-			if err = json.Unmarshal(value, &kvtechprofile); err == nil {
-				log.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
-				return &kvtechprofile
-			}
-		}
-	}
-	return nil
-}
-func (t *TechProfileMgr) CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile {
-	var tpInstance *TechProfile
-	log.Infow("Creating tech profile instance ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
-	tp := t.getTPFromKVStore(techProfiletblID)
-	if tp != nil {
-		log.Infow("Creating tech profile instance with profile from KV store", log.Fields{"tpid": techProfiletblID})
-	} else {
-		tp = t.getDefaultTechProfile()
-		log.Infow("Creating tech profile instance with default values", log.Fields{"tpid": techProfiletblID})
-	}
-	tpInstance = t.allocateTPInstance(uniPortName, tp, intfId, t.config.DefaultNumTconts)
-	if err := t.addTechProfInstanceToKVStore(techProfiletblID, uniPortName, tpInstance); err != nil {
-		log.Errorw("Error in adding tech profile instance to KV ", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
-		return nil
-	}
-	log.Infow("Added tech profile instance to KV store successfully ",
-		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
-	return tpInstance
-}
-
-func (t *TechProfileMgr) DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	return t.config.KVBackend.Delete(path)
-}
-
-func (t *TechProfileMgr) allocateTPInstance(uniPortName string, tp *DefaultTechProfile, intfId uint32, numOfTconts uint32) *TechProfile {
-
-	var usGemPortAttributeList []iGemPortAttribute
-	var dsGemPortAttributeList []iGemPortAttribute
-	var tcontIDs []uint32
-	var gemPorts []uint32
-	var err error
-
-	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numOfTconts": numOfTconts, "numGem": tp.NumGemPorts})
-	if numOfTconts > 1 {
-		log.Errorw("Multiple Tconts not supported currently", log.Fields{"uniPortName": uniPortName, "intfId": intfId})
-		return nil
-	}
-	if tcontIDs, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeAllocID(), numOfTconts); err != nil {
-		log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId, "numTconts": numOfTconts})
-		return nil
-	}
-	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
-	if gemPorts, err = t.resourceMgr.GetResourceID(intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
-		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
-		return nil
-	}
-	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
-	for index := 0; index < int(tp.NumGemPorts); index++ {
-		usGemPortAttributeList = append(usGemPortAttributeList,
-			iGemPortAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
-				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
-				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
-				SchedulingPolicy: tp.UpstreamGemPortAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    tp.UpstreamGemPortAttributeList[index].PriorityQueue,
-				Weight:           tp.UpstreamGemPortAttributeList[index].Weight,
-				DiscardPolicy:    tp.UpstreamGemPortAttributeList[index].DiscardPolicy,
-				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
-		dsGemPortAttributeList = append(dsGemPortAttributeList,
-			iGemPortAttribute{GemportID: gemPorts[index],
-				MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
-				PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
-				AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
-				SchedulingPolicy: tp.DownstreamGemPortAttributeList[index].SchedulingPolicy,
-				PriorityQueue:    tp.DownstreamGemPortAttributeList[index].PriorityQueue,
-				Weight:           tp.DownstreamGemPortAttributeList[index].Weight,
-				DiscardPolicy:    tp.DownstreamGemPortAttributeList[index].DiscardPolicy,
-				DiscardConfig:    tp.DownstreamGemPortAttributeList[index].DiscardConfig})
-	}
-	return &TechProfile{
-		SubscriberIdentifier: uniPortName,
-		Name:                 tp.Name,
-		ProfileType:          tp.ProfileType,
-		Version:              tp.Version,
-		NumGemPorts:          tp.NumGemPorts,
-		NumTconts:            numOfTconts,
-		InstanceCtrl:         tp.InstanceCtrl,
-		UsScheduler: iScheduler{
-			AllocID:      tcontIDs[0],
-			Direction:    tp.UsScheduler.Direction,
-			AdditionalBw: tp.UsScheduler.AdditionalBw,
-			Priority:     tp.UsScheduler.Priority,
-			Weight:       tp.UsScheduler.Weight,
-			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
-		DsScheduler: iScheduler{
-			AllocID:      tcontIDs[0],
-			Direction:    tp.DsScheduler.Direction,
-			AdditionalBw: tp.DsScheduler.AdditionalBw,
-			Priority:     tp.DsScheduler.Priority,
-			Weight:       tp.DsScheduler.Weight,
-			QSchedPolicy: tp.DsScheduler.QSchedPolicy},
-		UpstreamGemPortAttributeList:   usGemPortAttributeList,
-		DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
-
-	var usGemPortAttributeList []GemPortAttribute
-	var dsGemPortAttributeList []GemPortAttribute
-
-	for _, pbit := range t.config.DefaultPbits {
-		log.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
-		usGemPortAttributeList = append(usGemPortAttributeList,
-			GemPortAttribute{
-				MaxQueueSize:     defaultMaxQueueSize,
-				PbitMap:          pbit,
-				AesEncryption:    defaultAESEncryption,
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
-		dsGemPortAttributeList = append(dsGemPortAttributeList,
-			GemPortAttribute{
-				MaxQueueSize:     defaultMaxQueueSize,
-				PbitMap:          pbit,
-				AesEncryption:    defaultAESEncryption,
-				SchedulingPolicy: SchedulingPolicy_name[defaultSchedulePolicy],
-				PriorityQueue:    defaultPriorityQueue,
-				Weight:           defaultQueueWeight,
-				DiscardPolicy:    DiscardPolicy_name[defaultdropPolicy],
-				DiscardConfig: DiscardConfig{
-					MinThreshold:   defaultMinThreshold,
-					MaxThreshold:   defaultMaxThreshold,
-					MaxProbability: defaultMaxProbability}})
-	}
-	return &DefaultTechProfile{
-		Name:        t.config.DefaultTPName,
-		ProfileType: t.resourceMgr.GetTechnology(),
-		Version:     t.config.TPVersion,
-		NumGemPorts: uint32(len(usGemPortAttributeList)),
-		InstanceCtrl: InstanceControl{
-			Onu:               defaultOnuInstance,
-			Uni:               defaultUniInstance,
-			MaxGemPayloadSize: defaultGemPayloadSize},
-		UsScheduler: Scheduler{
-			Direction:    Direction_name[Direction_UPSTREAM],
-			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
-			Priority:     defaultPriority,
-			Weight:       defaultWeight,
-			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
-		DsScheduler: Scheduler{
-			Direction:    Direction_name[Direction_DOWNSTREAM],
-			AdditionalBw: AdditionalBW_name[defaultAdditionalBw],
-			Priority:     defaultPriority,
-			Weight:       defaultWeight,
-			QSchedPolicy: SchedulingPolicy_name[defaultQueueSchedPolicy]},
-		UpstreamGemPortAttributeList:   usGemPortAttributeList,
-		DownstreamGemPortAttributeList: dsGemPortAttributeList}
-}
-
-func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
-	var result int32 = -1
-
-	if paramType == "direction" {
-		for key, val := range tp_pb.Direction_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else if paramType == "discard_policy" {
-		for key, val := range tp_pb.DiscardPolicy_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else if paramType == "sched_policy" {
-		for key, val := range tp_pb.SchedulingPolicy_value {
-			if key == paramKey {
-				log.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
-				result = val
-			}
-		}
-	} else if paramType == "additional_bw" {
-		for key, val := range tp_pb.AdditionalBW_value {
-			if key == paramKey {
-				result = val
-			}
-		}
-	} else {
-		log.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
-		return -1
-	}
-	log.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
-	return result
-}
-
-func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig {
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
-	if dir == -1 {
-		log.Fatal("Error in getting Proto for direction for upstream scheduler")
-		return nil
-	}
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
-	if bw == -1 {
-		log.Fatal("Error in getting Proto for bandwidth for upstream scheduler")
-		return nil
-	}
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
-	if policy == -1 {
-		log.Fatal("Error in getting Proto for scheduling policy for upstream scheduler")
-		return nil
-	}
-	return &tp_pb.SchedulerConfig{
-		Direction:    dir,
-		AdditionalBw: bw,
-		Priority:     tpInstance.UsScheduler.Priority,
-		Weight:       tpInstance.UsScheduler.Weight,
-		SchedPolicy:  policy}
-}
-
-func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig {
-
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
-	if dir == -1 {
-		log.Fatal("Error in getting Proto for direction for downstream scheduler")
-		return nil
-	}
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
-	if bw == -1 {
-		log.Fatal("Error in getting Proto for bandwidth for downstream scheduler")
-		return nil
-	}
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
-	if policy == -1 {
-		log.Fatal("Error in getting Proto for scheduling policy for downstream scheduler")
-		return nil
-	}
-
-	return &tp_pb.SchedulerConfig{
-		Direction:    dir,
-		AdditionalBw: bw,
-		Priority:     tpInstance.DsScheduler.Priority,
-		Weight:       tpInstance.DsScheduler.Weight,
-		SchedPolicy:  policy}
-}
-
-func (t *TechProfileMgr) GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
-	ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler {
-
-	tSched := &tp_pb.TrafficScheduler{
-		Direction:          SchedCfg.Direction,
-		AllocId:            tpInstance.UsScheduler.AllocID,
-		TrafficShapingInfo: ShapingCfg,
-		Scheduler:          SchedCfg}
-
-	return tSched
-}
-
-func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue {
-
-	var encryp bool
-	if Dir == tp_pb.Direction_UPSTREAM {
-		// upstream GEM ports
-		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
-		GemPorts := make([]*tp_pb.TrafficQueue, 0)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			if tp.UpstreamGemPortAttributeList[Count].AesEncryption == "True" {
-				encryp = true
-			} else {
-				encryp = false
-			}
-			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
-				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
-				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
-				AesEncryption: encryp,
-				SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)),
-				Priority:      tp.UpstreamGemPortAttributeList[Count].PriorityQueue,
-				Weight:        tp.UpstreamGemPortAttributeList[Count].Weight,
-				DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)),
-			})
-		}
-		log.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
-		return GemPorts
-	} else if Dir == tp_pb.Direction_DOWNSTREAM {
-		//downstream GEM ports
-		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
-		GemPorts := make([]*tp_pb.TrafficQueue, 0)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			if tp.DownstreamGemPortAttributeList[Count].AesEncryption == "True" {
-				encryp = true
-			} else {
-				encryp = false
-			}
-			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
-				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
-				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
-				AesEncryption: encryp,
-				SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
-				Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
-				Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-				DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
-			})
-		}
-		log.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
-		return GemPorts
-	}
-	return nil
-}
-
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
-	UsScheduler := tpm.GetUsScheduler(tp)
-
-	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
-		AllocId:   tp.UsScheduler.AllocID,
-		Scheduler: UsScheduler}
-}
-
-func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32 {
-	/*
-	   Function to get the Gemport ID mapped to a pbit.
-	*/
-	if Dir == tp_pb.Direction_UPSTREAM {
-		// upstream GEM ports
-		NumGemPorts := len(tp.UpstreamGemPortAttributeList)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			NumPbitMaps := len(tp.UpstreamGemPortAttributeList[Count].PbitMap)
-			for ICount := 2; ICount < NumPbitMaps; ICount++ {
-				if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
-					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
-						log.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[Count].GemportID})
-						return tp.UpstreamGemPortAttributeList[Count].GemportID
-					}
-				}
-			}
-		}
-	} else if Dir == tp_pb.Direction_DOWNSTREAM {
-		//downstream GEM ports
-		NumGemPorts := len(tp.DownstreamGemPortAttributeList)
-		for Count := 0; Count < NumGemPorts; Count++ {
-			NumPbitMaps := len(tp.DownstreamGemPortAttributeList[Count].PbitMap)
-			for ICount := 2; ICount < NumPbitMaps; ICount++ {
-				if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[Count].PbitMap[ICount])); err == nil {
-					if uint32(ICount-2) == pbit && p == 1 { // Check this p-bit is set
-						log.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[Count].GemportID})
-						return tp.DownstreamGemPortAttributeList[Count].GemportID
-					}
-				}
-			}
-		}
-	}
-	log.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
-	return 0
-}
diff --git a/common/techprofile/tech_profile_if.go b/common/techprofile/tech_profile_if.go
deleted file mode 100644
index 1ed38db..0000000
--- a/common/techprofile/tech_profile_if.go
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright 2019-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package techprofile
-
-import (
-	"github.com/opencord/voltha-go/db/model"
-	tp_pb "github.com/opencord/voltha-protos/go/tech_profile"
-)
-
-type TechProfileIf interface {
-	SetKVClient() *model.Backend
-	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
-	GetTPInstanceFromKVStore(techProfiletblID uint32, path string) (*TechProfile, error)
-	CreateTechProfInstance(techProfiletblID uint32, uniPortName string, intfId uint32) *TechProfile
-	DeleteTechProfileInstance(techProfiletblID uint32, uniPortName string) error
-	GetprotoBufParamValue(paramType string, paramKey string) int32
-	GetUsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
-	GetDsScheduler(tpInstance *TechProfile) *tp_pb.SchedulerConfig
-	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
-		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
-	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) []*tp_pb.TrafficQueue
-	GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
-}
diff --git a/db/kvstore/kvutils_test.go b/db/kvstore/kvutils_test.go
deleted file mode 100644
index 86c4369..0000000
--- a/db/kvstore/kvutils_test.go
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kvstore
-
-import (
-	"github.com/stretchr/testify/assert"
-	"testing"
-	"time"
-)
-
-func TestDurationWithNegativeTimeout(t *testing.T) {
-	actualResult := GetDuration(-1)
-	var expectedResult = defaultKVGetTimeout * time.Second
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestDurationWithZeroTimeout(t *testing.T) {
-	actualResult := GetDuration(0)
-	var expectedResult = defaultKVGetTimeout * time.Second
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestDurationWithTimeout(t *testing.T) {
-	actualResult := GetDuration(10)
-	var expectedResult = time.Duration(10) * time.Second
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestToStringWithString(t *testing.T) {
-	actualResult, _ := ToString("myString")
-	var expectedResult = "myString"
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestToStringWithEmpty(t *testing.T) {
-	actualResult, _ := ToString("")
-	var expectedResult = ""
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestToStringWithByte(t *testing.T) {
-	mByte := []byte("Hello")
-	actualResult, _ := ToString(mByte)
-	var expectedResult = "Hello"
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestToStringWithEmptyByte(t *testing.T) {
-	mByte := []byte("")
-	actualResult, _ := ToString(mByte)
-	var expectedResult = ""
-
-	assert.Equal(t, expectedResult, actualResult)
-}
-
-func TestToStringForErrorCase(t *testing.T) {
-	mInt := 200
-	actualResult, error := ToString(mInt)
-	var expectedResult = ""
-
-	assert.Equal(t, expectedResult, actualResult)
-	assert.NotEqual(t, error, nil)
-}
diff --git a/db/model/base_test.go b/db/model/base_test.go
deleted file mode 100644
index 623d24b..0000000
--- a/db/model/base_test.go
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-protos/go/voltha"
-	"runtime/debug"
-	"sync"
-)
-
-type ModelTestConfig struct {
-	Root      *root
-	Backend   *Backend
-	RootProxy *Proxy
-	DbPrefix  string
-	DbType    string
-	DbHost    string
-	DbPort    int
-	DbTimeout int
-}
-
-var callbackMutex sync.Mutex
-
-func commonChanCallback(args ...interface{}) interface{} {
-	log.Infof("Running common callback - arg count: %d", len(args))
-
-	//for i := 0; i < len(args); i++ {
-	//	log.Infof("ARG %d : %+v", i, args[i])
-	//}
-
-	callbackMutex.Lock()
-	defer callbackMutex.Unlock()
-
-	execDoneChan := args[1].(*chan struct{})
-
-	// Inform the caller that the callback was executed
-	if *execDoneChan != nil {
-		log.Infof("Sending completion indication - stack:%s", string(debug.Stack()))
-		close(*execDoneChan)
-		*execDoneChan = nil
-	}
-
-	return nil
-}
-
-func commonCallback2(args ...interface{}) interface{} {
-	log.Infof("Running common2 callback - arg count: %d %+v", len(args), args)
-
-	return nil
-}
-
-func commonCallbackFunc(args ...interface{}) interface{} {
-	log.Infof("Running common callback - arg count: %d", len(args))
-
-	for i := 0; i < len(args); i++ {
-		log.Infof("ARG %d : %+v", i, args[i])
-	}
-	execStatusFunc := args[1].(func(bool))
-
-	// Inform the caller that the callback was executed
-	execStatusFunc(true)
-
-	return nil
-}
-
-func firstCallback(args ...interface{}) interface{} {
-	name := args[0]
-	id := args[1]
-	log.Infof("Running first callback - name: %s, id: %s\n", name, id)
-	return nil
-}
-
-func secondCallback(args ...interface{}) interface{} {
-	name := args[0].(map[string]string)
-	id := args[1]
-	log.Infof("Running second callback - name: %s, id: %f\n", name["name"], id)
-	// FIXME: the panic call seem to interfere with the logging mechanism
-	//panic("Generating a panic in second callback")
-	return nil
-}
-
-func thirdCallback(args ...interface{}) interface{} {
-	name := args[0]
-	id := args[1].(*voltha.Device)
-	log.Infof("Running third callback - name: %+v, id: %s\n", name, id.Id)
-	return nil
-}
diff --git a/db/model/branch_test.go b/db/model/branch_test.go
deleted file mode 100644
index cf8406c..0000000
--- a/db/model/branch_test.go
+++ /dev/null
@@ -1,109 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"crypto/md5"
-	"fmt"
-	"testing"
-)
-
-var (
-	TestBranch_BRANCH *Branch
-	TestBranch_HASH   string
-)
-
-// Create a new branch and ensure that fields are populated
-func TestBranch_NewBranch(t *testing.T) {
-	node := &node{}
-	hash := fmt.Sprintf("%x", md5.Sum([]byte("origin_hash")))
-	origin := &NonPersistedRevision{
-		Config:   &DataRevision{},
-		Children: make(map[string][]Revision),
-		Hash:     hash,
-		Branch:   &Branch{},
-	}
-	txid := fmt.Sprintf("%x", md5.Sum([]byte("branch_transaction_id")))
-
-	TestBranch_BRANCH = NewBranch(node, txid, origin, true)
-	t.Logf("New Branch(txid:%s) created: %+v\n", txid, TestBranch_BRANCH)
-
-	if TestBranch_BRANCH.Latest == nil {
-		t.Errorf("Branch latest pointer is nil")
-	} else if TestBranch_BRANCH.Origin == nil {
-		t.Errorf("Branch origin pointer is nil")
-	} else if TestBranch_BRANCH.Node == nil {
-		t.Errorf("Branch node pointer is nil")
-	} else if TestBranch_BRANCH.Revisions == nil {
-		t.Errorf("Branch revisions map is nil")
-	} else if TestBranch_BRANCH.Txid == "" {
-		t.Errorf("Branch transaction id is empty")
-	}
-}
-
-// Add a new revision to the branch
-func TestBranch_AddRevision(t *testing.T) {
-	TestBranch_HASH = fmt.Sprintf("%x", md5.Sum([]byte("revision_hash")))
-	rev := &NonPersistedRevision{
-		Config:   &DataRevision{},
-		Children: make(map[string][]Revision),
-		Hash:     TestBranch_HASH,
-		Branch:   &Branch{},
-	}
-
-	TestBranch_BRANCH.AddRevision(rev)
-	t.Logf("Added revision: %+v\n", rev)
-
-	if len(TestBranch_BRANCH.Revisions) == 0 {
-		t.Errorf("Branch revisions map is empty")
-	}
-}
-
-// Ensure that the added revision can be retrieved
-func TestBranch_GetRevision(t *testing.T) {
-	if rev := TestBranch_BRANCH.GetRevision(TestBranch_HASH); rev == nil {
-		t.Errorf("Unable to retrieve revision for hash:%s", TestBranch_HASH)
-	} else {
-		t.Logf("Got revision for hash:%s rev:%+v\n", TestBranch_HASH, rev)
-	}
-}
-
-// Set the added revision as the latest
-func TestBranch_LatestRevision(t *testing.T) {
-	addedRevision := TestBranch_BRANCH.GetRevision(TestBranch_HASH)
-	TestBranch_BRANCH.SetLatest(addedRevision)
-
-	rev := TestBranch_BRANCH.GetLatest()
-	t.Logf("Retrieved latest revision :%+v", rev)
-
-	if rev == nil {
-		t.Error("Unable to retrieve latest revision")
-	} else if rev.GetHash() != TestBranch_HASH {
-		t.Errorf("Latest revision does not match hash: %s", TestBranch_HASH)
-	}
-}
-
-// Ensure that the origin revision remains and differs from subsequent revisions
-func TestBranch_OriginRevision(t *testing.T) {
-	rev := TestBranch_BRANCH.Origin
-	t.Logf("Retrieved origin revision :%+v", rev)
-
-	if rev == nil {
-		t.Error("Unable to retrieve origin revision")
-	} else if rev.GetHash() == TestBranch_HASH {
-		t.Errorf("Origin revision should differ from added revision: %s", TestBranch_HASH)
-	}
-}
diff --git a/db/model/child_type_test.go b/db/model/child_type_test.go
deleted file mode 100644
index 4725975..0000000
--- a/db/model/child_type_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"github.com/opencord/voltha-protos/go/voltha"
-	"reflect"
-	"testing"
-)
-
-// Dissect a proto message by extracting all the children fields
-func TestChildType_01_Device_Proto_ChildrenFields(t *testing.T) {
-	var cls *voltha.Device
-
-	t.Logf("Extracting children fields from proto type: %s", reflect.TypeOf(cls))
-	names := ChildrenFields(cls)
-	t.Logf("Extracting children field names: %+v", names)
-
-	expectedKeys := []string{"ports", "flows", "flow_groups", "image_downloads", "pm_configs"}
-	for _, key := range expectedKeys {
-		if _, exists := names[key]; !exists {
-			t.Errorf("Missing key:%s from class type:%s", key, reflect.TypeOf(cls))
-		}
-	}
-}
-
-// Verify that the cache contains an entry for types on which ChildrenFields was performed
-func TestChildType_02_Cache_Keys(t *testing.T) {
-	if _, exists := getChildTypes().Cache[reflect.TypeOf(&voltha.Device{}).String()]; !exists {
-		t.Errorf("getChildTypeCache().Cache should have an entry of type: %+v\n", reflect.TypeOf(&voltha.Device{}).String())
-	}
-	for k := range getChildTypes().Cache {
-		t.Logf("getChildTypeCache().Cache Key:%+v\n", k)
-	}
-}
diff --git a/db/model/node_test.go b/db/model/node_test.go
deleted file mode 100644
index 240e918..0000000
--- a/db/model/node_test.go
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"crypto/md5"
-	"fmt"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-protos/go/common"
-	"github.com/opencord/voltha-protos/go/openflow_13"
-	"github.com/opencord/voltha-protos/go/voltha"
-	"reflect"
-	"testing"
-)
-
-var (
-	TestNode_Port = []*voltha.Port{
-		{
-			PortNo:     123,
-			Label:      "test-etcd_port-0",
-			Type:       voltha.Port_PON_OLT,
-			AdminState: common.AdminState_ENABLED,
-			OperStatus: common.OperStatus_ACTIVE,
-			DeviceId:   "etcd_port-0-device-id",
-			Peers:      []*voltha.Port_PeerPort{},
-		},
-	}
-
-	TestNode_Device = &voltha.Device{
-		Id:              "Config-SomeNode-01-new-test",
-		Type:            "simulated_olt",
-		Root:            true,
-		ParentId:        "",
-		ParentPortNo:    0,
-		Vendor:          "voltha-test",
-		Model:           "GetLatest-voltha-simulated-olt",
-		HardwareVersion: "1.0.0",
-		FirmwareVersion: "1.0.0",
-		Images:          &voltha.Images{},
-		SerialNumber:    "abcdef-123456",
-		VendorId:        "DEADBEEF-INC",
-		Adapter:         "simulated_olt",
-		Vlan:            1234,
-		Address:         &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
-		ExtraArgs:       "",
-		ProxyAddress:    &voltha.Device_ProxyAddress{},
-		AdminState:      voltha.AdminState_PREPROVISIONED,
-		OperStatus:      common.OperStatus_ACTIVE,
-		Reason:          "",
-		ConnectStatus:   common.ConnectStatus_REACHABLE,
-		Custom:          &any.Any{},
-		Ports:           TestNode_Port,
-		Flows:           &openflow_13.Flows{},
-		FlowGroups:      &openflow_13.FlowGroups{},
-		PmConfigs:       &voltha.PmConfigs{},
-		ImageDownloads:  []*voltha.ImageDownload{},
-	}
-
-	TestNode_Data = TestNode_Device
-
-	TestNode_Txid = fmt.Sprintf("%x", md5.Sum([]byte("node_transaction_id")))
-	TestNode_Root = &root{RevisionClass: reflect.TypeOf(NonPersistedRevision{})}
-)
-
-// Exercise node creation code
-// This test will
-func TestNode_01_NewNode(t *testing.T) {
-	node := NewNode(TestNode_Root, TestNode_Data, false, TestNode_Txid)
-
-	if reflect.ValueOf(node.Type).Type() != reflect.TypeOf(TestNode_Data) {
-		t.Errorf("Node type does not match original data type: %+v", reflect.ValueOf(node.Type).Type())
-	} else if node.GetBranch(TestNode_Txid) == nil || node.GetBranch(TestNode_Txid).Latest == nil {
-		t.Errorf("No branch associated to txid: %s", TestNode_Txid)
-	} else if node.GetBranch(TestNode_Txid).Latest == nil {
-		t.Errorf("Branch has no latest revision : %s", TestNode_Txid)
-	} else if node.GetBranch(TestNode_Txid).GetLatest().GetConfig() == nil {
-		t.Errorf("Latest revision has no assigned data: %+v", node.GetBranch(TestNode_Txid).GetLatest())
-	}
-
-	t.Logf("Created new node successfully : %+v\n", node)
-}
diff --git a/db/model/proxy_load_test.go b/db/model/proxy_load_test.go
deleted file mode 100644
index f4fd325..0000000
--- a/db/model/proxy_load_test.go
+++ /dev/null
@@ -1,332 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"context"
-	"encoding/hex"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-protos/go/common"
-	"github.com/opencord/voltha-protos/go/openflow_13"
-	"github.com/opencord/voltha-protos/go/voltha"
-	"math/rand"
-	"reflect"
-	"strconv"
-	"sync"
-	"testing"
-)
-
-var (
-	BenchmarkProxy_Root        *root
-	BenchmarkProxy_DeviceProxy *Proxy
-	BenchmarkProxy_PLT         *proxyLoadTest
-	BenchmarkProxy_Logger      log.Logger
-)
-
-type proxyLoadChanges struct {
-	ID     string
-	Before interface{}
-	After  interface{}
-}
-type proxyLoadTest struct {
-	mutex sync.RWMutex
-
-	addMutex     sync.RWMutex
-	addedDevices []string
-
-	firmwareMutex    sync.RWMutex
-	updatedFirmwares []proxyLoadChanges
-	flowMutex        sync.RWMutex
-	updatedFlows     []proxyLoadChanges
-
-	preAddExecuted     bool
-	postAddExecuted    bool
-	preUpdateExecuted  bool
-	postUpdateExecuted bool
-}
-
-func (plt *proxyLoadTest) SetPreAddExecuted(status bool) {
-	plt.mutex.Lock()
-	defer plt.mutex.Unlock()
-	plt.preAddExecuted = status
-}
-func (plt *proxyLoadTest) SetPostAddExecuted(status bool) {
-	plt.mutex.Lock()
-	defer plt.mutex.Unlock()
-	plt.postAddExecuted = status
-}
-func (plt *proxyLoadTest) SetPreUpdateExecuted(status bool) {
-	plt.mutex.Lock()
-	defer plt.mutex.Unlock()
-	plt.preUpdateExecuted = status
-}
-func (plt *proxyLoadTest) SetPostUpdateExecuted(status bool) {
-	plt.mutex.Lock()
-	defer plt.mutex.Unlock()
-	plt.postUpdateExecuted = status
-}
-
-func init() {
-	BenchmarkProxy_Root = NewRoot(&voltha.Voltha{}, nil)
-
-	BenchmarkProxy_Logger, _ = log.AddPackage(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"})
-	//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
-	//Setup default logger - applies for packages that do not have specific logger set
-	if _, err := log.SetDefaultLogger(log.JSON, log.DebugLevel, log.Fields{"instanceId": "PLT"}); err != nil {
-		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
-	}
-
-	// Update all loggers (provisioned via init) with a common field
-	if err := log.UpdateAllLoggers(log.Fields{"instanceId": "PLT"}); err != nil {
-		log.With(log.Fields{"error": err}).Fatal("Cannot setup logging")
-	}
-	log.SetPackageLogLevel("github.com/opencord/voltha-go/db/model", log.DebugLevel)
-
-	BenchmarkProxy_DeviceProxy = BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
-	// Register ADD instructions callbacks
-	BenchmarkProxy_PLT = &proxyLoadTest{}
-
-	BenchmarkProxy_DeviceProxy.RegisterCallback(PRE_ADD, commonCallbackFunc, "PRE_ADD", BenchmarkProxy_PLT.SetPreAddExecuted)
-	BenchmarkProxy_DeviceProxy.RegisterCallback(POST_ADD, commonCallbackFunc, "POST_ADD", BenchmarkProxy_PLT.SetPostAddExecuted)
-
-	//// Register UPDATE instructions callbacks
-	BenchmarkProxy_DeviceProxy.RegisterCallback(PRE_UPDATE, commonCallbackFunc, "PRE_UPDATE", BenchmarkProxy_PLT.SetPreUpdateExecuted)
-	BenchmarkProxy_DeviceProxy.RegisterCallback(POST_UPDATE, commonCallbackFunc, "POST_UPDATE", BenchmarkProxy_PLT.SetPostUpdateExecuted)
-
-}
-
-func BenchmarkProxy_AddDevice(b *testing.B) {
-	defer GetProfiling().Report()
-	b.RunParallel(func(pb *testing.PB) {
-		b.Log("Started adding devices")
-		for pb.Next() {
-			ltPorts := []*voltha.Port{
-				{
-					PortNo:     123,
-					Label:      "lt-port-0",
-					Type:       voltha.Port_PON_OLT,
-					AdminState: common.AdminState_ENABLED,
-					OperStatus: common.OperStatus_ACTIVE,
-					DeviceId:   "lt-port-0-device-id",
-					Peers:      []*voltha.Port_PeerPort{},
-				},
-			}
-
-			ltStats := &openflow_13.OfpFlowStats{
-				Id: 1000,
-			}
-			ltFlows := &openflow_13.Flows{
-				Items: []*openflow_13.OfpFlowStats{ltStats},
-			}
-			ltDevice := &voltha.Device{
-				Id:         "",
-				Type:       "simulated_olt",
-				Address:    &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
-				AdminState: voltha.AdminState_PREPROVISIONED,
-				Flows:      ltFlows,
-				Ports:      ltPorts,
-			}
-
-			ltDevIDBin, _ := uuid.New().MarshalBinary()
-			ltDevID := "0001" + hex.EncodeToString(ltDevIDBin)[:12]
-			ltDevice.Id = ltDevID
-
-			BenchmarkProxy_PLT.SetPreAddExecuted(false)
-			BenchmarkProxy_PLT.SetPostAddExecuted(false)
-
-			var added interface{}
-			// Add the device
-			if added = BenchmarkProxy_DeviceProxy.AddWithID(context.Background(), "/devices", ltDevID, ltDevice, ""); added == nil {
-				BenchmarkProxy_Logger.Errorf("Failed to add device: %+v", ltDevice)
-				continue
-			} else {
-				BenchmarkProxy_Logger.Infof("Device was added 1: %+v", added)
-			}
-
-			BenchmarkProxy_PLT.addMutex.Lock()
-			BenchmarkProxy_PLT.addedDevices = append(BenchmarkProxy_PLT.addedDevices, added.(*voltha.Device).Id)
-			BenchmarkProxy_PLT.addMutex.Unlock()
-		}
-	})
-
-	BenchmarkProxy_Logger.Infof("Number of added devices : %d", len(BenchmarkProxy_PLT.addedDevices))
-}
-
-func BenchmarkProxy_UpdateFirmware(b *testing.B) {
-	b.RunParallel(func(pb *testing.PB) {
-		for pb.Next() {
-			//for i:=0; i < b.N; i++ {
-
-			if len(BenchmarkProxy_PLT.addedDevices) > 0 {
-				var target interface{}
-				randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
-				firmProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/", false)
-				if target = firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
-					""); !reflect.ValueOf(target).IsValid() {
-					BenchmarkProxy_Logger.Errorf("Failed to find device: %s %+v", randomID, target)
-					continue
-				}
-
-				BenchmarkProxy_PLT.SetPreUpdateExecuted(false)
-				BenchmarkProxy_PLT.SetPostUpdateExecuted(false)
-				firmProxy.RegisterCallback(PRE_UPDATE, commonCallbackFunc, "PRE_UPDATE", BenchmarkProxy_PLT.SetPreUpdateExecuted)
-				firmProxy.RegisterCallback(POST_UPDATE, commonCallbackFunc, "POST_UPDATE", BenchmarkProxy_PLT.SetPostUpdateExecuted)
-
-				var fwVersion int
-
-				before := target.(*voltha.Device).FirmwareVersion
-				if target.(*voltha.Device).FirmwareVersion == "n/a" {
-					fwVersion = 0
-				} else {
-					fwVersion, _ = strconv.Atoi(target.(*voltha.Device).FirmwareVersion)
-					fwVersion++
-				}
-
-				target.(*voltha.Device).FirmwareVersion = strconv.Itoa(fwVersion)
-				after := target.(*voltha.Device).FirmwareVersion
-
-				var updated interface{}
-				if updated = firmProxy.Update(context.Background(), "/devices/"+randomID, target.(*voltha.Device), false,
-					""); updated == nil {
-					BenchmarkProxy_Logger.Errorf("Failed to update device: %+v", target)
-					continue
-				} else {
-					BenchmarkProxy_Logger.Infof("Device was updated : %+v", updated)
-
-				}
-
-				if d := firmProxy.Get(context.Background(), "/devices/"+randomID, 0, false,
-					""); !reflect.ValueOf(d).IsValid() {
-					BenchmarkProxy_Logger.Errorf("Failed to get device: %s", randomID)
-					continue
-				} else if d.(*voltha.Device).FirmwareVersion == after {
-					BenchmarkProxy_Logger.Infof("Imm Device was updated with new value: %s %+v", randomID, d)
-				} else if d.(*voltha.Device).FirmwareVersion == before {
-					BenchmarkProxy_Logger.Errorf("Imm Device kept old value: %s %+v %+v", randomID, d, target)
-				} else {
-					BenchmarkProxy_Logger.Errorf("Imm Device has unknown value: %s %+v %+v", randomID, d, target)
-				}
-
-				BenchmarkProxy_PLT.firmwareMutex.Lock()
-
-				BenchmarkProxy_PLT.updatedFirmwares = append(
-					BenchmarkProxy_PLT.updatedFirmwares,
-					proxyLoadChanges{ID: randomID, Before: before, After: after},
-				)
-				BenchmarkProxy_PLT.firmwareMutex.Unlock()
-			}
-		}
-	})
-}
-
-func traverseBranches(revision Revision, depth int) {
-	if revision == nil {
-		return
-	}
-	prefix := strconv.Itoa(depth) + " ~~~~ "
-	for i := 0; i < depth; i++ {
-		prefix += "  "
-	}
-
-	BenchmarkProxy_Logger.Debugf("%sRevision: %s %+v", prefix, revision.GetHash(), revision.GetData())
-
-	//for brIdx, brRev := range revision.GetBranch().Revisions {
-	//	BenchmarkProxy_Logger.Debugf("%sbranchIndex: %s", prefix, brIdx)
-	//	traverseBranches(brRev, depth+1)
-	//}
-	for childrenI, children := range revision.GetAllChildren() {
-		BenchmarkProxy_Logger.Debugf("%schildrenIndex: %s, length: %d", prefix, childrenI, len(children))
-
-		for _, subrev := range children {
-			//subrev.GetBranch().Latest
-			traverseBranches(subrev, depth+1)
-		}
-	}
-
-}
-func BenchmarkProxy_UpdateFlows(b *testing.B) {
-	b.RunParallel(func(pb *testing.PB) {
-		for pb.Next() {
-			if len(BenchmarkProxy_PLT.addedDevices) > 0 {
-				randomID := BenchmarkProxy_PLT.addedDevices[rand.Intn(len(BenchmarkProxy_PLT.addedDevices))]
-
-				flowsProxy := BenchmarkProxy_Root.node.CreateProxy(context.Background(), "/devices/"+randomID+"/flows", false)
-				flows := flowsProxy.Get(context.Background(), "/", 0, false, "")
-
-				before := flows.(*openflow_13.Flows).Items[0].TableId
-				flows.(*openflow_13.Flows).Items[0].TableId = uint32(rand.Intn(3000))
-				after := flows.(*openflow_13.Flows).Items[0].TableId
-
-				flowsProxy.RegisterCallback(
-					PRE_UPDATE,
-					commonCallback2,
-				)
-				flowsProxy.RegisterCallback(
-					POST_UPDATE,
-					commonCallback2,
-				)
-
-				var updated interface{}
-				if updated = flowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
-					b.Errorf("Failed to update flows for device: %+v", flows)
-				} else {
-					BenchmarkProxy_Logger.Infof("Flows were updated : %+v", updated)
-				}
-				BenchmarkProxy_PLT.flowMutex.Lock()
-				BenchmarkProxy_PLT.updatedFlows = append(
-					BenchmarkProxy_PLT.updatedFlows,
-					proxyLoadChanges{ID: randomID, Before: before, After: after},
-				)
-				BenchmarkProxy_PLT.flowMutex.Unlock()
-			}
-		}
-	})
-}
-
-func BenchmarkProxy_GetDevices(b *testing.B) {
-	//traverseBranches(BenchmarkProxy_DeviceProxy.Root.node.Branches[NONE].GetLatest(), 0)
-
-	for i := 0; i < len(BenchmarkProxy_PLT.addedDevices); i++ {
-		devToGet := BenchmarkProxy_PLT.addedDevices[i]
-		// Verify that the added device can now be retrieved
-		if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
-			""); !reflect.ValueOf(d).IsValid() {
-			BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
-			continue
-		} else {
-			BenchmarkProxy_Logger.Infof("Got device: %s %+v", devToGet, d)
-		}
-	}
-}
-
-func BenchmarkProxy_GetUpdatedFirmware(b *testing.B) {
-	for i := 0; i < len(BenchmarkProxy_PLT.updatedFirmwares); i++ {
-		devToGet := BenchmarkProxy_PLT.updatedFirmwares[i].ID
-		// Verify that the updated device can be retrieved and that the updates were actually applied
-		if d := BenchmarkProxy_DeviceProxy.Get(context.Background(), "/devices/"+devToGet, 0, false,
-			""); !reflect.ValueOf(d).IsValid() {
-			BenchmarkProxy_Logger.Errorf("Failed to get device: %s", devToGet)
-			continue
-		} else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxy_PLT.updatedFirmwares[i].After.(string) {
-			BenchmarkProxy_Logger.Infof("Device was updated with new value: %s %+v", devToGet, d)
-		} else if d.(*voltha.Device).FirmwareVersion == BenchmarkProxy_PLT.updatedFirmwares[i].Before.(string) {
-			BenchmarkProxy_Logger.Errorf("Device kept old value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFirmwares[i])
-		} else {
-			BenchmarkProxy_Logger.Errorf("Device has unknown value: %s %+v %+v", devToGet, d, BenchmarkProxy_PLT.updatedFirmwares[i])
-		}
-	}
-}
diff --git a/db/model/proxy_test.go b/db/model/proxy_test.go
deleted file mode 100644
index 3f65997..0000000
--- a/db/model/proxy_test.go
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package model
-
-import (
-	"context"
-	"encoding/hex"
-	"encoding/json"
-	"github.com/golang/protobuf/proto"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-protos/go/common"
-	"github.com/opencord/voltha-protos/go/openflow_13"
-	"github.com/opencord/voltha-protos/go/voltha"
-	"math/rand"
-	"reflect"
-	"strconv"
-	"testing"
-	"time"
-)
-
-var (
-	TestProxy_Root                  *root
-	TestProxy_Root_LogicalDevice    *Proxy
-	TestProxy_Root_Device           *Proxy
-	TestProxy_Root_Adapter          *Proxy
-	TestProxy_DeviceId              string
-	TestProxy_AdapterId             string
-	TestProxy_LogicalDeviceId       string
-	TestProxy_TargetDeviceId        string
-	TestProxy_TargetLogicalDeviceId string
-	TestProxy_LogicalPorts          []*voltha.LogicalPort
-	TestProxy_Ports                 []*voltha.Port
-	TestProxy_Stats                 *openflow_13.OfpFlowStats
-	TestProxy_Flows                 *openflow_13.Flows
-	TestProxy_Device                *voltha.Device
-	TestProxy_LogicalDevice         *voltha.LogicalDevice
-	TestProxy_Adapter               *voltha.Adapter
-)
-
-func init() {
-	//log.AddPackage(log.JSON, log.InfoLevel, log.Fields{"instanceId": "DB_MODEL"})
-	//log.UpdateAllLoggers(log.Fields{"instanceId": "PROXY_LOAD_TEST"})
-	TestProxy_Root = NewRoot(&voltha.Voltha{}, nil)
-	TestProxy_Root_LogicalDevice = TestProxy_Root.CreateProxy(context.Background(), "/", false)
-	TestProxy_Root_Device = TestProxy_Root.CreateProxy(context.Background(), "/", false)
-	TestProxy_Root_Adapter = TestProxy_Root.CreateProxy(context.Background(), "/", false)
-
-	TestProxy_LogicalPorts = []*voltha.LogicalPort{
-		{
-			Id:           "123",
-			DeviceId:     "logicalport-0-device-id",
-			DevicePortNo: 123,
-			RootPort:     false,
-		},
-	}
-	TestProxy_Ports = []*voltha.Port{
-		{
-			PortNo:     123,
-			Label:      "test-port-0",
-			Type:       voltha.Port_PON_OLT,
-			AdminState: common.AdminState_ENABLED,
-			OperStatus: common.OperStatus_ACTIVE,
-			DeviceId:   "etcd_port-0-device-id",
-			Peers:      []*voltha.Port_PeerPort{},
-		},
-	}
-
-	TestProxy_Stats = &openflow_13.OfpFlowStats{
-		Id: 1111,
-	}
-	TestProxy_Flows = &openflow_13.Flows{
-		Items: []*openflow_13.OfpFlowStats{TestProxy_Stats},
-	}
-	TestProxy_Device = &voltha.Device{
-		Id:         TestProxy_DeviceId,
-		Type:       "simulated_olt",
-		Address:    &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
-		AdminState: voltha.AdminState_PREPROVISIONED,
-		Flows:      TestProxy_Flows,
-		Ports:      TestProxy_Ports,
-	}
-
-	TestProxy_LogicalDevice = &voltha.LogicalDevice{
-		Id:         TestProxy_DeviceId,
-		DatapathId: 0,
-		Ports:      TestProxy_LogicalPorts,
-		Flows:      TestProxy_Flows,
-	}
-
-	TestProxy_Adapter = &voltha.Adapter{
-		Id:      TestProxy_AdapterId,
-		Vendor:  "test-adapter-vendor",
-		Version: "test-adapter-version",
-	}
-}
-
-func TestProxy_1_1_1_Add_NewDevice(t *testing.T) {
-	devIDBin, _ := uuid.New().MarshalBinary()
-	TestProxy_DeviceId = "0001" + hex.EncodeToString(devIDBin)[:12]
-	TestProxy_Device.Id = TestProxy_DeviceId
-
-	preAddExecuted := make(chan struct{})
-	postAddExecuted := make(chan struct{})
-	preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
-
-	devicesProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices", false)
-	devicesProxy.RegisterCallback(PRE_ADD, commonCallback2, "PRE_ADD Device container changes")
-	devicesProxy.RegisterCallback(POST_ADD, commonCallback2, "POST_ADD Device container changes")
-
-	// Register ADD instructions callbacks
-	TestProxy_Root_Device.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
-	TestProxy_Root_Device.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
-
-	if added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, ""); added == nil {
-		t.Error("Failed to add device")
-	} else {
-		t.Logf("Added device : %+v", added)
-	}
-
-	if !verifyGotResponse(preAddExecuted) {
-		t.Error("PRE_ADD callback was not executed")
-	}
-	if !verifyGotResponse(postAddExecuted) {
-		t.Error("POST_ADD callback was not executed")
-	}
-
-	// Verify that the added device can now be retrieved
-	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
-		t.Error("Failed to find added device")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found device: %s", string(djson))
-	}
-}
-
-func TestProxy_1_1_2_Add_ExistingDevice(t *testing.T) {
-	TestProxy_Device.Id = TestProxy_DeviceId
-
-	added := TestProxy_Root_Device.Add(context.Background(), "/devices", TestProxy_Device, "")
-	if added.(proto.Message).String() != reflect.ValueOf(TestProxy_Device).Interface().(proto.Message).String() {
-		t.Errorf("Devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
-	}
-}
-
-func verifyGotResponse(callbackIndicator <-chan struct{}) bool {
-	timeout := time.After(1 * time.Second)
-	// Wait until the channel closes, or we time out
-	select {
-	case <-callbackIndicator:
-		// Received response successfully
-		return true
-
-	case <-timeout:
-		// Got a timeout! fail with a timeout error
-		return false
-	}
-}
-
-func TestProxy_1_1_3_Add_NewAdapter(t *testing.T) {
-	TestProxy_AdapterId = "test-adapter"
-	TestProxy_Adapter.Id = TestProxy_AdapterId
-	preAddExecuted := make(chan struct{})
-	postAddExecuted := make(chan struct{})
-	preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
-
-	// Register ADD instructions callbacks
-	TestProxy_Root_Adapter.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions for adapters", &preAddExecutedPtr)
-	TestProxy_Root_Adapter.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions for adapters", &postAddExecutedPtr)
-
-	// Add the adapter
-	if added := TestProxy_Root_Adapter.Add(context.Background(), "/adapters", TestProxy_Adapter, ""); added == nil {
-		t.Error("Failed to add adapter")
-	} else {
-		t.Logf("Added adapter : %+v", added)
-	}
-
-	verifyGotResponse(postAddExecuted)
-
-	// Verify that the added device can now be retrieved
-	if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
-		t.Error("Failed to find added adapter")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found adapter: %s", string(djson))
-	}
-
-	if !verifyGotResponse(preAddExecuted) {
-		t.Error("PRE_ADD callback was not executed")
-	}
-	if !verifyGotResponse(postAddExecuted) {
-		t.Error("POST_ADD callback was not executed")
-	}
-}
-
-func TestProxy_1_2_1_Get_AllDevices(t *testing.T) {
-	devices := TestProxy_Root_Device.Get(context.Background(), "/devices", 1, false, "")
-
-	if len(devices.([]interface{})) == 0 {
-		t.Error("there are no available devices to retrieve")
-	} else {
-		// Save the target device id for later tests
-		TestProxy_TargetDeviceId = devices.([]interface{})[0].(*voltha.Device).Id
-		t.Logf("retrieved all devices: %+v", devices)
-	}
-}
-
-func TestProxy_1_2_2_Get_SingleDevice(t *testing.T) {
-	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 0, false, ""); !reflect.ValueOf(d).IsValid() {
-		t.Errorf("Failed to find device : %s", TestProxy_TargetDeviceId)
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found device: %s", string(djson))
-	}
-}
-
-func TestProxy_1_3_1_Update_Device(t *testing.T) {
-	var fwVersion int
-
-	preUpdateExecuted := make(chan struct{})
-	postUpdateExecuted := make(chan struct{})
-	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
-
-	if retrieved := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); retrieved == nil {
-		t.Error("Failed to get device")
-	} else {
-		t.Logf("Found raw device (root proxy): %+v", retrieved)
-
-		if retrieved.(*voltha.Device).FirmwareVersion == "n/a" {
-			fwVersion = 0
-		} else {
-			fwVersion, _ = strconv.Atoi(retrieved.(*voltha.Device).FirmwareVersion)
-			fwVersion++
-		}
-
-		retrieved.(*voltha.Device).FirmwareVersion = strconv.Itoa(fwVersion)
-
-		TestProxy_Root_Device.RegisterCallback(
-			PRE_UPDATE,
-			commonChanCallback,
-			"PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
-		)
-		TestProxy_Root_Device.RegisterCallback(
-			POST_UPDATE,
-			commonChanCallback,
-			"POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
-		)
-
-		if afterUpdate := TestProxy_Root_Device.Update(context.Background(), "/devices/"+TestProxy_TargetDeviceId, retrieved, false, ""); afterUpdate == nil {
-			t.Error("Failed to update device")
-		} else {
-			t.Logf("Updated device : %+v", afterUpdate)
-		}
-
-		if !verifyGotResponse(preUpdateExecuted) {
-			t.Error("PRE_UPDATE callback was not executed")
-		}
-		if !verifyGotResponse(postUpdateExecuted) {
-			t.Error("POST_UPDATE callback was not executed")
-		}
-
-		if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_TargetDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
-			t.Error("Failed to find updated device (root proxy)")
-		} else {
-			djson, _ := json.Marshal(d)
-			t.Logf("Found device (root proxy): %s raw: %+v", string(djson), d)
-		}
-	}
-}
-
-func TestProxy_1_3_2_Update_DeviceFlows(t *testing.T) {
-	// Get a device proxy and update a specific port
-	devFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", false)
-	flows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
-	flows.(*openflow_13.Flows).Items[0].TableId = 2244
-
-	preUpdateExecuted := make(chan struct{})
-	postUpdateExecuted := make(chan struct{})
-	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
-
-	devFlowsProxy.RegisterCallback(
-		PRE_UPDATE,
-		commonChanCallback,
-		"PRE_UPDATE instructions (flows proxy)", &preUpdateExecutedPtr,
-	)
-	devFlowsProxy.RegisterCallback(
-		POST_UPDATE,
-		commonChanCallback,
-		"POST_UPDATE instructions (flows proxy)", &postUpdateExecutedPtr,
-	)
-
-	kvFlows := devFlowsProxy.Get(context.Background(), "/", 0, false, "")
-
-	if reflect.DeepEqual(flows, kvFlows) {
-		t.Errorf("Local changes have changed the KV store contents -  local:%+v, kv: %+v", flows, kvFlows)
-	}
-
-	if updated := devFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
-		t.Error("Failed to update flow")
-	} else {
-		t.Logf("Updated flows : %+v", updated)
-	}
-
-	if !verifyGotResponse(preUpdateExecuted) {
-		t.Error("PRE_UPDATE callback was not executed")
-	}
-	if !verifyGotResponse(postUpdateExecuted) {
-		t.Error("POST_UPDATE callback was not executed")
-	}
-
-	if d := devFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
-		t.Error("Failed to find updated flows (flows proxy)")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found flows (flows proxy): %s", string(djson))
-	}
-
-	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId+"/flows", 1, false, ""); !reflect.ValueOf(d).IsValid() {
-		t.Error("Failed to find updated flows (root proxy)")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found flows (root proxy): %s", string(djson))
-	}
-}
-
-func TestProxy_1_3_3_Update_Adapter(t *testing.T) {
-	preUpdateExecuted := make(chan struct{})
-	postUpdateExecuted := make(chan struct{})
-	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
-
-	adaptersProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/adapters", false)
-
-	if retrieved := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); retrieved == nil {
-		t.Error("Failed to get adapter")
-	} else {
-		t.Logf("Found raw adapter (root proxy): %+v", retrieved)
-
-		retrieved.(*voltha.Adapter).Version = "test-adapter-version-2"
-
-		adaptersProxy.RegisterCallback(
-			PRE_UPDATE,
-			commonChanCallback,
-			"PRE_UPDATE instructions for adapters", &preUpdateExecutedPtr,
-		)
-		adaptersProxy.RegisterCallback(
-			POST_UPDATE,
-			commonChanCallback,
-			"POST_UPDATE instructions for adapters", &postUpdateExecutedPtr,
-		)
-
-		if afterUpdate := adaptersProxy.Update(context.Background(), "/"+TestProxy_AdapterId, retrieved, false, ""); afterUpdate == nil {
-			t.Error("Failed to update adapter")
-		} else {
-			t.Logf("Updated adapter : %+v", afterUpdate)
-		}
-
-		if !verifyGotResponse(preUpdateExecuted) {
-			t.Error("PRE_UPDATE callback for adapter was not executed")
-		}
-		if !verifyGotResponse(postUpdateExecuted) {
-			t.Error("POST_UPDATE callback for adapter was not executed")
-		}
-
-		if d := TestProxy_Root_Adapter.Get(context.Background(), "/adapters/"+TestProxy_AdapterId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
-			t.Error("Failed to find updated adapter (root proxy)")
-		} else {
-			djson, _ := json.Marshal(d)
-			t.Logf("Found adapter (root proxy): %s raw: %+v", string(djson), d)
-		}
-	}
-}
-
-func TestProxy_1_4_1_Remove_Device(t *testing.T) {
-	preRemoveExecuted := make(chan struct{})
-	postRemoveExecuted := make(chan struct{})
-	preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
-
-	TestProxy_Root_Device.RegisterCallback(
-		PRE_REMOVE,
-		commonChanCallback,
-		"PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
-	)
-	TestProxy_Root_Device.RegisterCallback(
-		POST_REMOVE,
-		commonChanCallback,
-		"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
-	)
-
-	if removed := TestProxy_Root_Device.Remove(context.Background(), "/devices/"+TestProxy_DeviceId, ""); removed == nil {
-		t.Error("Failed to remove device")
-	} else {
-		t.Logf("Removed device : %+v", removed)
-	}
-
-	if !verifyGotResponse(preRemoveExecuted) {
-		t.Error("PRE_REMOVE callback was not executed")
-	}
-	if !verifyGotResponse(postRemoveExecuted) {
-		t.Error("POST_REMOVE callback was not executed")
-	}
-
-	if d := TestProxy_Root_Device.Get(context.Background(), "/devices/"+TestProxy_DeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
-		djson, _ := json.Marshal(d)
-		t.Errorf("Device was not removed - %s", djson)
-	} else {
-		t.Logf("Device was removed: %s", TestProxy_DeviceId)
-	}
-}
-
-func TestProxy_2_1_1_Add_NewLogicalDevice(t *testing.T) {
-
-	ldIDBin, _ := uuid.New().MarshalBinary()
-	TestProxy_LogicalDeviceId = "0001" + hex.EncodeToString(ldIDBin)[:12]
-	TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
-
-	preAddExecuted := make(chan struct{})
-	postAddExecuted := make(chan struct{})
-	preAddExecutedPtr, postAddExecutedPtr := preAddExecuted, postAddExecuted
-
-	// Register
-	TestProxy_Root_LogicalDevice.RegisterCallback(PRE_ADD, commonChanCallback, "PRE_ADD instructions", &preAddExecutedPtr)
-	TestProxy_Root_LogicalDevice.RegisterCallback(POST_ADD, commonChanCallback, "POST_ADD instructions", &postAddExecutedPtr)
-
-	if added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, ""); added == nil {
-		t.Error("Failed to add logical device")
-	} else {
-		t.Logf("Added logical device : %+v", added)
-	}
-
-	verifyGotResponse(postAddExecuted)
-
-	if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
-		t.Error("Failed to find added logical device")
-	} else {
-		ldJSON, _ := json.Marshal(ld)
-		t.Logf("Found logical device: %s", string(ldJSON))
-	}
-
-	if !verifyGotResponse(preAddExecuted) {
-		t.Error("PRE_ADD callback was not executed")
-	}
-	if !verifyGotResponse(postAddExecuted) {
-		t.Error("POST_ADD callback was not executed")
-	}
-}
-
-func TestProxy_2_1_2_Add_ExistingLogicalDevice(t *testing.T) {
-	TestProxy_LogicalDevice.Id = TestProxy_LogicalDeviceId
-
-	added := TestProxy_Root_LogicalDevice.Add(context.Background(), "/logical_devices", TestProxy_LogicalDevice, "")
-	if added.(proto.Message).String() != reflect.ValueOf(TestProxy_LogicalDevice).Interface().(proto.Message).String() {
-		t.Errorf("Logical devices don't match - existing: %+v returned: %+v", TestProxy_LogicalDevice, added)
-	}
-}
-
-func TestProxy_2_2_1_Get_AllLogicalDevices(t *testing.T) {
-	logicalDevices := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices", 1, false, "")
-
-	if len(logicalDevices.([]interface{})) == 0 {
-		t.Error("there are no available logical devices to retrieve")
-	} else {
-		// Save the target device id for later tests
-		TestProxy_TargetLogicalDeviceId = logicalDevices.([]interface{})[0].(*voltha.LogicalDevice).Id
-		t.Logf("retrieved all logical devices: %+v", logicalDevices)
-	}
-}
-
-func TestProxy_2_2_2_Get_SingleLogicalDevice(t *testing.T) {
-	if ld := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 0, false, ""); !reflect.ValueOf(ld).IsValid() {
-		t.Errorf("Failed to find logical device : %s", TestProxy_TargetLogicalDeviceId)
-	} else {
-		ldJSON, _ := json.Marshal(ld)
-		t.Logf("Found logical device: %s", string(ldJSON))
-	}
-
-}
-
-func TestProxy_2_3_1_Update_LogicalDevice(t *testing.T) {
-	var fwVersion int
-	preUpdateExecuted := make(chan struct{})
-	postUpdateExecuted := make(chan struct{})
-	preUpdateExecutedPtr, postUpdateExecutedPtr := preUpdateExecuted, postUpdateExecuted
-
-	if retrieved := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); retrieved == nil {
-		t.Error("Failed to get logical device")
-	} else {
-		t.Logf("Found raw logical device (root proxy): %+v", retrieved)
-
-		if retrieved.(*voltha.LogicalDevice).RootDeviceId == "" {
-			fwVersion = 0
-		} else {
-			fwVersion, _ = strconv.Atoi(retrieved.(*voltha.LogicalDevice).RootDeviceId)
-			fwVersion++
-		}
-
-		TestProxy_Root_LogicalDevice.RegisterCallback(
-			PRE_UPDATE,
-			commonChanCallback,
-			"PRE_UPDATE instructions (root proxy)", &preUpdateExecutedPtr,
-		)
-		TestProxy_Root_LogicalDevice.RegisterCallback(
-			POST_UPDATE,
-			commonChanCallback,
-			"POST_UPDATE instructions (root proxy)", &postUpdateExecutedPtr,
-		)
-
-		retrieved.(*voltha.LogicalDevice).RootDeviceId = strconv.Itoa(fwVersion)
-
-		if afterUpdate := TestProxy_Root_LogicalDevice.Update(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, retrieved, false,
-			""); afterUpdate == nil {
-			t.Error("Failed to update logical device")
-		} else {
-			t.Logf("Updated logical device : %+v", afterUpdate)
-		}
-
-		if !verifyGotResponse(preUpdateExecuted) {
-			t.Error("PRE_UPDATE callback was not executed")
-		}
-		if !verifyGotResponse(postUpdateExecuted) {
-			t.Error("POST_UPDATE callback was not executed")
-		}
-
-		if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_TargetLogicalDeviceId, 1, false, ""); !reflect.ValueOf(d).IsValid() {
-			t.Error("Failed to find updated logical device (root proxy)")
-		} else {
-			djson, _ := json.Marshal(d)
-
-			t.Logf("Found logical device (root proxy): %s raw: %+v", string(djson), d)
-		}
-	}
-}
-
-func TestProxy_2_3_2_Update_LogicalDeviceFlows(t *testing.T) {
-	// Get a device proxy and update a specific port
-	ldFlowsProxy := TestProxy_Root.node.CreateProxy(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", false)
-	flows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
-	flows.(*openflow_13.Flows).Items[0].TableId = rand.Uint32()
-	t.Logf("before updated flows: %+v", flows)
-
-	ldFlowsProxy.RegisterCallback(
-		PRE_UPDATE,
-		commonCallback2,
-	)
-	ldFlowsProxy.RegisterCallback(
-		POST_UPDATE,
-		commonCallback2,
-	)
-
-	kvFlows := ldFlowsProxy.Get(context.Background(), "/", 0, false, "")
-
-	if reflect.DeepEqual(flows, kvFlows) {
-		t.Errorf("Local changes have changed the KV store contents -  local:%+v, kv: %+v", flows, kvFlows)
-	}
-
-	if updated := ldFlowsProxy.Update(context.Background(), "/", flows.(*openflow_13.Flows), false, ""); updated == nil {
-		t.Error("Failed to update logical device flows")
-	} else {
-		t.Logf("Updated logical device flows : %+v", updated)
-	}
-
-	if d := ldFlowsProxy.Get(context.Background(), "/", 0, false, ""); d == nil {
-		t.Error("Failed to find updated logical device flows (flows proxy)")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found flows (flows proxy): %s", string(djson))
-	}
-
-	if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId+"/flows", 0, false,
-		""); !reflect.ValueOf(d).IsValid() {
-		t.Error("Failed to find updated logical device flows (root proxy)")
-	} else {
-		djson, _ := json.Marshal(d)
-		t.Logf("Found logical device flows (root proxy): %s", string(djson))
-	}
-}
-
-func TestProxy_2_4_1_Remove_Device(t *testing.T) {
-	preRemoveExecuted := make(chan struct{})
-	postRemoveExecuted := make(chan struct{})
-	preRemoveExecutedPtr, postRemoveExecutedPtr := preRemoveExecuted, postRemoveExecuted
-
-	TestProxy_Root_LogicalDevice.RegisterCallback(
-		PRE_REMOVE,
-		commonChanCallback,
-		"PRE_REMOVE instructions (root proxy)", &preRemoveExecutedPtr,
-	)
-	TestProxy_Root_LogicalDevice.RegisterCallback(
-		POST_REMOVE,
-		commonChanCallback,
-		"POST_REMOVE instructions (root proxy)", &postRemoveExecutedPtr,
-	)
-
-	if removed := TestProxy_Root_LogicalDevice.Remove(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, ""); removed == nil {
-		t.Error("Failed to remove logical device")
-	} else {
-		t.Logf("Removed device : %+v", removed)
-	}
-
-	if !verifyGotResponse(preRemoveExecuted) {
-		t.Error("PRE_REMOVE callback was not executed")
-	}
-	if !verifyGotResponse(postRemoveExecuted) {
-		t.Error("POST_REMOVE callback was not executed")
-	}
-
-	if d := TestProxy_Root_LogicalDevice.Get(context.Background(), "/logical_devices/"+TestProxy_LogicalDeviceId, 0, false, ""); reflect.ValueOf(d).IsValid() {
-		djson, _ := json.Marshal(d)
-		t.Errorf("Device was not removed - %s", djson)
-	} else {
-		t.Logf("Device was removed: %s", TestProxy_LogicalDeviceId)
-	}
-}
-
-// -----------------------------
-// Callback tests
-// -----------------------------
-
-func TestProxy_Callbacks_1_Register(t *testing.T) {
-	TestProxy_Root_Device.RegisterCallback(PRE_ADD, firstCallback, "abcde", "12345")
-
-	m := make(map[string]string)
-	m["name"] = "fghij"
-	TestProxy_Root_Device.RegisterCallback(PRE_ADD, secondCallback, m, 1.2345)
-
-	d := &voltha.Device{Id: "12345"}
-	TestProxy_Root_Device.RegisterCallback(PRE_ADD, thirdCallback, "klmno", d)
-}
-
-func TestProxy_Callbacks_2_Invoke_WithNoInterruption(t *testing.T) {
-	TestProxy_Root_Device.InvokeCallbacks(PRE_ADD, false, nil)
-}
-
-func TestProxy_Callbacks_3_Invoke_WithInterruption(t *testing.T) {
-	TestProxy_Root_Device.InvokeCallbacks(PRE_ADD, true, nil)
-}
-
-func TestProxy_Callbacks_4_Unregister(t *testing.T) {
-	TestProxy_Root_Device.UnregisterCallback(PRE_ADD, firstCallback)
-	TestProxy_Root_Device.UnregisterCallback(PRE_ADD, secondCallback)
-	TestProxy_Root_Device.UnregisterCallback(PRE_ADD, thirdCallback)
-}
-
-//func TestProxy_Callbacks_5_Add(t *testing.T) {
-//	TestProxy_Root_Device.Root.AddCallback(TestProxy_Root_Device.InvokeCallbacks, POST_UPDATE, false, "some data", "some new data")
-//}
-//
-//func TestProxy_Callbacks_6_Execute(t *testing.T) {
-//	TestProxy_Root_Device.Root.ExecuteCallbacks()
-//}
diff --git a/db/model/transaction_test.go b/db/model/transaction_test.go
deleted file mode 100644
index 3660a86..0000000
--- a/db/model/transaction_test.go
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package model
-
-import (
-	"context"
-	"encoding/hex"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-protos/go/common"
-	"github.com/opencord/voltha-protos/go/voltha"
-	"strconv"
-	"testing"
-)
-
-var (
-	TestTransaction_Root           *root
-	TestTransaction_RootProxy      *Proxy
-	TestTransaction_TargetDeviceId string
-	TestTransaction_DeviceId       string
-)
-
-func init() {
-	TestTransaction_Root = NewRoot(&voltha.Voltha{}, nil)
-	TestTransaction_RootProxy = TestTransaction_Root.node.CreateProxy(context.Background(), "/", false)
-}
-
-//func TestTransaction_1_GetDevices(t *testing.T) {
-//	getTx := TestTransaction_RootProxy.OpenTransaction()
-//
-//	devices := getTx.Get("/devices", 1, false)
-//
-//	if len(devices.([]interface{})) == 0 {
-//		t.Error("there are no available devices to retrieve")
-//	} else {
-//		// Save the target device id for later tests
-//		TestTransaction_TargetDeviceId = devices.([]interface{})[0].(*voltha.Device).Id
-//		t.Logf("retrieved devices: %+v", devices)
-//	}
-//
-//	getTx.Commit()
-//}
-
-func TestTransaction_2_AddDevice(t *testing.T) {
-	devIDBin, _ := uuid.New().MarshalBinary()
-	TestTransaction_DeviceId = "0001" + hex.EncodeToString(devIDBin)[:12]
-
-	ports := []*voltha.Port{
-		{
-			PortNo:     123,
-			Label:      "test-port-0",
-			Type:       voltha.Port_PON_OLT,
-			AdminState: common.AdminState_ENABLED,
-			OperStatus: common.OperStatus_ACTIVE,
-			DeviceId:   "etcd_port-0-device-id",
-			Peers:      []*voltha.Port_PeerPort{},
-		},
-	}
-
-	device := &voltha.Device{
-		Id:         TestTransaction_DeviceId,
-		Type:       "simulated_olt",
-		Address:    &voltha.Device_HostAndPort{HostAndPort: "1.2.3.4:5555"},
-		AdminState: voltha.AdminState_PREPROVISIONED,
-		Ports:      ports,
-	}
-
-	addTx := TestTransaction_RootProxy.OpenTransaction()
-
-	if added := addTx.Add(context.Background(), "/devices", device); added == nil {
-		t.Error("Failed to add device")
-	} else {
-		TestTransaction_TargetDeviceId = added.(*voltha.Device).Id
-		t.Logf("Added device : %+v", added)
-	}
-	addTx.Commit()
-}
-
-func TestTransaction_3_GetDevice_PostAdd(t *testing.T) {
-
-	basePath := "/devices/" + TestTransaction_DeviceId
-
-	getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
-	device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
-	t.Logf("retrieved device with ports: %+v", device1)
-	getDevWithPortsTx.Commit()
-
-	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device2 := getDevTx.Get(context.Background(), basePath, 0, false)
-	t.Logf("retrieved device: %+v", device2)
-
-	getDevTx.Commit()
-}
-
-func TestTransaction_4_UpdateDevice(t *testing.T) {
-	updateTx := TestTransaction_RootProxy.OpenTransaction()
-	if retrieved := updateTx.Get(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, 1, false); retrieved == nil {
-		t.Error("Failed to get device")
-	} else {
-		var fwVersion int
-		if retrieved.(*voltha.Device).FirmwareVersion == "n/a" {
-			fwVersion = 0
-		} else {
-			fwVersion, _ = strconv.Atoi(retrieved.(*voltha.Device).FirmwareVersion)
-			fwVersion++
-		}
-
-		//cloned := reflect.ValueOf(retrieved).Elem().Interface().(voltha.Device)
-		retrieved.(*voltha.Device).FirmwareVersion = strconv.Itoa(fwVersion)
-		t.Logf("Before update : %+v", retrieved)
-
-		// FIXME: The makeBranch passed in function is nil or not being executed properly!!!!!
-		if afterUpdate := updateTx.Update(context.Background(), "/devices/"+TestTransaction_TargetDeviceId, retrieved, false); afterUpdate == nil {
-			t.Error("Failed to update device")
-		} else {
-			t.Logf("Updated device : %+v", afterUpdate)
-		}
-	}
-	updateTx.Commit()
-}
-
-func TestTransaction_5_GetDevice_PostUpdate(t *testing.T) {
-
-	basePath := "/devices/" + TestTransaction_DeviceId
-
-	getDevWithPortsTx := TestTransaction_RootProxy.OpenTransaction()
-	device1 := getDevWithPortsTx.Get(context.Background(), basePath+"/ports", 1, false)
-	t.Logf("retrieved device with ports: %+v", device1)
-	getDevWithPortsTx.Commit()
-
-	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device2 := getDevTx.Get(context.Background(), basePath, 0, false)
-	t.Logf("retrieved device: %+v", device2)
-
-	getDevTx.Commit()
-}
-
-func TestTransaction_6_RemoveDevice(t *testing.T) {
-	removeTx := TestTransaction_RootProxy.OpenTransaction()
-	if removed := removeTx.Remove(context.Background(), "/devices/"+TestTransaction_DeviceId); removed == nil {
-		t.Error("Failed to remove device")
-	} else {
-		t.Logf("Removed device : %+v", removed)
-	}
-	removeTx.Commit()
-}
-
-func TestTransaction_7_GetDevice_PostRemove(t *testing.T) {
-
-	basePath := "/devices/" + TestTransaction_DeviceId
-
-	getDevTx := TestTransaction_RootProxy.OpenTransaction()
-	device := TestTransaction_RootProxy.Get(context.Background(), basePath, 0, false, "")
-	t.Logf("retrieved device: %+v", device)
-
-	getDevTx.Commit()
-}
diff --git a/docker/Dockerfile.ro_core b/docker/Dockerfile.ro_core
index a0fdf1a..15b352d 100644
--- a/docker/Dockerfile.ro_core
+++ b/docker/Dockerfile.ro_core
@@ -36,8 +36,6 @@
 
 # Copy common files.
 COPY common ./common
-COPY db ./db
-COPY kafka ./kafka
 COPY vendor ./vendor
 
 # Copy files.
diff --git a/docker/Dockerfile.rw_core b/docker/Dockerfile.rw_core
index 523d447..32a6238 100644
--- a/docker/Dockerfile.rw_core
+++ b/docker/Dockerfile.rw_core
@@ -36,8 +36,6 @@
 
 # Copy common files.
 COPY common ./common
-COPY db ./db
-COPY kafka ./kafka
 COPY vendor ./vendor
 
 # Copy files
diff --git a/go.mod b/go.mod
index 5640f48..fa6a144 100644
--- a/go.mod
+++ b/go.mod
@@ -3,47 +3,14 @@
 go 1.12
 
 require (
-	github.com/DataDog/zstd v1.4.1 // indirect
-	github.com/Shopify/sarama v1.23.1
-	github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect
 	github.com/bclicn/color v0.0.0-20180711051946-108f2023dc84
-	github.com/boljen/go-bitmap v0.0.0-20151001105940-23cd2fb0ce7d
-	github.com/bsm/sarama-cluster v2.1.15+incompatible
 	github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73
-	github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a // indirect
-	github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea // indirect
-	github.com/eapache/go-resiliency v1.2.0 // indirect
-	github.com/frankban/quicktest v1.5.0 // indirect
 	github.com/gogo/protobuf v1.3.0
 	github.com/golang/protobuf v1.3.2
 	github.com/google/uuid v1.1.1
 	github.com/gyuho/goraph v0.0.0-20160328020532-d460590d53a9
-	github.com/hashicorp/consul/api v1.2.0
-	github.com/hashicorp/go-immutable-radix v1.1.0 // indirect
-	github.com/hashicorp/go-msgpack v0.5.5 // indirect
-	github.com/hashicorp/go-rootcerts v1.0.1 // indirect
-	github.com/hashicorp/golang-lru v0.5.3 // indirect
-	github.com/hashicorp/memberlist v0.1.5 // indirect
-	github.com/hashicorp/serf v0.8.4 // indirect
-	github.com/jcmturner/gofork v1.0.0 // indirect
-	github.com/onsi/gomega v1.4.2 // indirect
+	github.com/opencord/voltha-lib-go v0.0.0-20191017201200-e73f91e306e9
 	github.com/opencord/voltha-protos v1.0.3
-	github.com/pierrec/lz4 v2.3.0+incompatible // indirect
-	github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
-	github.com/spf13/pflag v1.0.3 // indirect
 	github.com/stretchr/testify v1.4.0
-	go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
-	go.uber.org/atomic v1.4.0 // indirect
-	go.uber.org/multierr v1.2.0 // indirect
-	go.uber.org/zap v1.10.0
-	golang.org/x/crypto v0.0.0-20191001170739-f9e2070545dc // indirect
-	golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 // indirect
-	golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24 // indirect
-	golang.org/x/text v0.3.2 // indirect
-	google.golang.org/appengine v1.4.0 // indirect
-	google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c // indirect
 	google.golang.org/grpc v1.24.0
-	gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect
-	gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect
-	gopkg.in/yaml.v2 v2.2.3 // indirect
 )
diff --git a/go.sum b/go.sum
index e1c3833..60cb314 100644
--- a/go.sum
+++ b/go.sum
@@ -192,6 +192,8 @@
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
 github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/opencord/voltha-lib-go v0.0.0-20191017201200-e73f91e306e9 h1:/CA9esQ/L41vhvwCIDI+cLjrawry5+1fQWt/O91KFXU=
+github.com/opencord/voltha-lib-go v0.0.0-20191017201200-e73f91e306e9/go.mod h1:+bjwfm5bbP1j6liscpn3UFqbh6hHDkmLDWU3AdYLDY4=
 github.com/opencord/voltha-protos v1.0.3 h1:9v+R/QGF1xK+HKTqFM0IqCABoGCAxC8iKH4VzNBJDto=
 github.com/opencord/voltha-protos v1.0.3/go.mod h1:myfFIkJdA+rCXmKdLImhh79MfabN4ZOKQ4grk32DnPQ=
 github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
diff --git a/kafka/kafka_inter_container_library_test.go b/kafka/kafka_inter_container_library_test.go
deleted file mode 100644
index 790425e..0000000
--- a/kafka/kafka_inter_container_library_test.go
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka
-
-import (
-	"github.com/stretchr/testify/assert"
-	"testing"
-)
-
-func TestDefaultKafkaProxy(t *testing.T) {
-	actualResult, error := NewInterContainerProxy()
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, DefaultKafkaHost)
-	assert.Equal(t, actualResult.kafkaPort, DefaultKafkaPort)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, interface{}(nil))
-}
-
-func TestKafkaProxyOptionHost(t *testing.T) {
-	actualResult, error := NewInterContainerProxy(InterContainerHost("10.20.30.40"))
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, "10.20.30.40")
-	assert.Equal(t, actualResult.kafkaPort, DefaultKafkaPort)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, interface{}(nil))
-}
-
-func TestKafkaProxyOptionPort(t *testing.T) {
-	actualResult, error := NewInterContainerProxy(InterContainerPort(1020))
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, DefaultKafkaHost)
-	assert.Equal(t, actualResult.kafkaPort, 1020)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, interface{}(nil))
-}
-
-func TestKafkaProxyOptionTopic(t *testing.T) {
-	actualResult, error := NewInterContainerProxy(DefaultTopic(&Topic{Name: "Adapter"}))
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, DefaultKafkaHost)
-	assert.Equal(t, actualResult.kafkaPort, DefaultKafkaPort)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, interface{}(nil))
-	assert.Equal(t, actualResult.DefaultTopic.Name, "Adapter")
-}
-
-type myInterface struct {
-}
-
-func (m *myInterface) doSomething() {
-}
-
-func TestKafkaProxyOptionTargetInterface(t *testing.T) {
-	var m *myInterface
-	actualResult, error := NewInterContainerProxy(RequestHandlerInterface(m))
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, DefaultKafkaHost)
-	assert.Equal(t, actualResult.kafkaPort, DefaultKafkaPort)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, m)
-}
-
-func TestKafkaProxyChangeAllOptions(t *testing.T) {
-	var m *myInterface
-	actualResult, error := NewInterContainerProxy(
-		InterContainerHost("10.20.30.40"),
-		InterContainerPort(1020),
-		DefaultTopic(&Topic{Name: "Adapter"}),
-		RequestHandlerInterface(m))
-	assert.Equal(t, error, nil)
-	assert.Equal(t, actualResult.kafkaHost, "10.20.30.40")
-	assert.Equal(t, actualResult.kafkaPort, 1020)
-	assert.Equal(t, actualResult.defaultRequestHandlerInterface, m)
-	assert.Equal(t, actualResult.DefaultTopic.Name, "Adapter")
-}
diff --git a/ro_core/config/config.go b/ro_core/config/config.go
index 28cde01..376ed37 100644
--- a/ro_core/config/config.go
+++ b/ro_core/config/config.go
@@ -18,7 +18,7 @@
 import (
 	"flag"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"os"
 )
 
diff --git a/ro_core/core/core.go b/ro_core/core/core.go
index 8b0317b..493edb2 100644
--- a/ro_core/core/core.go
+++ b/ro_core/core/core.go
@@ -17,12 +17,12 @@
 
 import (
 	"context"
-	grpcserver "github.com/opencord/voltha-go/common/grpc"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/db/model"
 	"github.com/opencord/voltha-go/ro_core/config"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	grpcserver "github.com/opencord/voltha-lib-go/pkg/grpc"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc"
 )
diff --git a/ro_core/core/device_agent.go b/ro_core/core/device_agent.go
index a64931c..89d7d94 100644
--- a/ro_core/core/device_agent.go
+++ b/ro_core/core/device_agent.go
@@ -18,8 +18,8 @@
 import (
 	"context"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/ro_core/core/device_manager.go b/ro_core/core/device_manager.go
index c42eee3..550382a 100644
--- a/ro_core/core/device_manager.go
+++ b/ro_core/core/device_manager.go
@@ -17,9 +17,9 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/ro_core/core/grpc_nbi_api_handler.go b/ro_core/core/grpc_nbi_api_handler.go
index c68b230..b77133d 100644
--- a/ro_core/core/grpc_nbi_api_handler.go
+++ b/ro_core/core/grpc_nbi_api_handler.go
@@ -20,7 +20,7 @@
 	"errors"
 	"github.com/golang/protobuf/ptypes/empty"
 	da "github.com/opencord/voltha-go/common/core/northbound/grpc"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/common"
 	"github.com/opencord/voltha-protos/go/omci"
 	"github.com/opencord/voltha-protos/go/openflow_13"
diff --git a/ro_core/core/logical_device_agent.go b/ro_core/core/logical_device_agent.go
index d1c8887..f4b3c82 100644
--- a/ro_core/core/logical_device_agent.go
+++ b/ro_core/core/logical_device_agent.go
@@ -18,8 +18,8 @@
 import (
 	"context"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/ro_core/core/logical_device_manager.go b/ro_core/core/logical_device_manager.go
index 215a406..87e889d 100644
--- a/ro_core/core/logical_device_manager.go
+++ b/ro_core/core/logical_device_manager.go
@@ -17,9 +17,9 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/ro_core/core/model_proxy.go b/ro_core/core/model_proxy.go
index 473e579..da0871b 100644
--- a/ro_core/core/model_proxy.go
+++ b/ro_core/core/model_proxy.go
@@ -17,8 +17,8 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 	"strings"
diff --git a/ro_core/core/model_proxy_manager.go b/ro_core/core/model_proxy_manager.go
index 5613475..e6be4b3 100644
--- a/ro_core/core/model_proxy_manager.go
+++ b/ro_core/core/model_proxy_manager.go
@@ -18,9 +18,9 @@
 import (
 	"context"
 	"encoding/json"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/version"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/version"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/ro_core/main.go b/ro_core/main.go
index a8508b8..71f5d66 100644
--- a/ro_core/main.go
+++ b/ro_core/main.go
@@ -19,13 +19,13 @@
 	"context"
 	"errors"
 	"fmt"
-	grpcserver "github.com/opencord/voltha-go/common/grpc"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/common/version"
-	"github.com/opencord/voltha-go/db/kvstore"
 	"github.com/opencord/voltha-go/ro_core/config"
 	c "github.com/opencord/voltha-go/ro_core/core"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	grpcserver "github.com/opencord/voltha-lib-go/pkg/grpc"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
+	"github.com/opencord/voltha-lib-go/pkg/version"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"os"
 	"os/signal"
diff --git a/rw_core/config/config.go b/rw_core/config/config.go
index 5f4a0e0..deae0f3 100644
--- a/rw_core/config/config.go
+++ b/rw_core/config/config.go
@@ -18,7 +18,7 @@
 import (
 	"flag"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 )
 
 // RW Core service default constants
diff --git a/rw_core/core/adapter_manager.go b/rw_core/core/adapter_manager.go
index b67487f..5f2423e 100644
--- a/rw_core/core/adapter_manager.go
+++ b/rw_core/core/adapter_manager.go
@@ -20,9 +20,9 @@
 	"errors"
 	"fmt"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"reflect"
 	"sync"
diff --git a/rw_core/core/adapter_proxy.go b/rw_core/core/adapter_proxy.go
index 9511b9d..57fa197 100755
--- a/rw_core/core/adapter_proxy.go
+++ b/rw_core/core/adapter_proxy.go
@@ -19,8 +19,8 @@
 	"context"
 	"github.com/golang/protobuf/ptypes"
 	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/rw_core/core/adapter_request_handler.go b/rw_core/core/adapter_request_handler.go
index 5247247..5e76e27 100644
--- a/rw_core/core/adapter_request_handler.go
+++ b/rw_core/core/adapter_request_handler.go
@@ -19,10 +19,10 @@
 	"errors"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
-	"github.com/opencord/voltha-go/kafka"
 	"github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
diff --git a/rw_core/core/core.go b/rw_core/core/core.go
index f22f8b8..438d645 100644
--- a/rw_core/core/core.go
+++ b/rw_core/core/core.go
@@ -17,13 +17,13 @@
 
 import (
 	"context"
-	grpcserver "github.com/opencord/voltha-go/common/grpc"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/db/model"
-	"github.com/opencord/voltha-go/kafka"
 	"github.com/opencord/voltha-go/rw_core/config"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	grpcserver "github.com/opencord/voltha-lib-go/pkg/grpc"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
diff --git a/rw_core/core/device_agent.go b/rw_core/core/device_agent.go
index 23a6ab0..95e4f67 100755
--- a/rw_core/core/device_agent.go
+++ b/rw_core/core/device_agent.go
@@ -19,9 +19,9 @@
 	"context"
 	"fmt"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
 	fu "github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/rw_core/core/device_manager.go b/rw_core/core/device_manager.go
index 1b0f586..bb36966 100755
--- a/rw_core/core/device_manager.go
+++ b/rw_core/core/device_manager.go
@@ -18,11 +18,11 @@
 import (
 	"context"
 	"errors"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/model"
-	"github.com/opencord/voltha-go/kafka"
 	"github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/rw_core/core/device_ownership.go b/rw_core/core/device_ownership.go
index ade876b..53aaa5c 100644
--- a/rw_core/core/device_ownership.go
+++ b/rw_core/core/device_ownership.go
@@ -18,9 +18,9 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
 	"github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/rw_core/core/device_state_transitions.go b/rw_core/core/device_state_transitions.go
index 098a758..0c6fc7c 100644
--- a/rw_core/core/device_state_transitions.go
+++ b/rw_core/core/device_state_transitions.go
@@ -16,8 +16,8 @@
 package core
 
 import (
-	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/rw_core/coreIf"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 )
 
diff --git a/rw_core/core/device_state_transitions_test.go b/rw_core/core/device_state_transitions_test.go
index dab88eb..b76b64a 100644
--- a/rw_core/core/device_state_transitions_test.go
+++ b/rw_core/core/device_state_transitions_test.go
@@ -16,8 +16,8 @@
 package core
 
 import (
-	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/rw_core/coreIf"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"github.com/stretchr/testify/assert"
 	"reflect"
diff --git a/rw_core/core/grpc_nbi_api_handler.go b/rw_core/core/grpc_nbi_api_handler.go
index 482abfc..44520c4 100755
--- a/rw_core/core/grpc_nbi_api_handler.go
+++ b/rw_core/core/grpc_nbi_api_handler.go
@@ -20,8 +20,8 @@
 	"errors"
 	"github.com/golang/protobuf/ptypes/empty"
 	da "github.com/opencord/voltha-go/common/core/northbound/grpc"
-	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/common"
 	"github.com/opencord/voltha-protos/go/omci"
 	"github.com/opencord/voltha-protos/go/openflow_13"
diff --git a/rw_core/core/logical_device_agent.go b/rw_core/core/logical_device_agent.go
index 9461c94..21f65fa 100644
--- a/rw_core/core/logical_device_agent.go
+++ b/rw_core/core/logical_device_agent.go
@@ -20,11 +20,11 @@
 	"errors"
 	"fmt"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/model"
 	fd "github.com/opencord/voltha-go/rw_core/flow_decomposition"
 	"github.com/opencord/voltha-go/rw_core/graph"
 	fu "github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/rw_core/core/logical_device_manager.go b/rw_core/core/logical_device_manager.go
index fa9713f..b3f313c 100644
--- a/rw_core/core/logical_device_manager.go
+++ b/rw_core/core/logical_device_manager.go
@@ -18,10 +18,10 @@
 import (
 	"context"
 	"errors"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/db/model"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/db/model"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
 	"github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
diff --git a/rw_core/core/transaction.go b/rw_core/core/transaction.go
index 1b4370d..b607973 100644
--- a/rw_core/core/transaction.go
+++ b/rw_core/core/transaction.go
@@ -29,8 +29,8 @@
 package core
 
 import (
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 	"time"
diff --git a/rw_core/flow_decomposition/flow_decomposer.go b/rw_core/flow_decomposition/flow_decomposer.go
index d4058bc..c3c66ca 100644
--- a/rw_core/flow_decomposition/flow_decomposer.go
+++ b/rw_core/flow_decomposition/flow_decomposer.go
@@ -18,10 +18,10 @@
 
 import (
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/rw_core/coreIf"
 	"github.com/opencord/voltha-go/rw_core/graph"
 	fu "github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
 )
diff --git a/rw_core/flow_decomposition/flow_decomposer_test.go b/rw_core/flow_decomposition/flow_decomposer_test.go
index 5e1dbe7..f122380 100644
--- a/rw_core/flow_decomposition/flow_decomposer_test.go
+++ b/rw_core/flow_decomposition/flow_decomposer_test.go
@@ -17,9 +17,9 @@
 
 import (
 	"errors"
-	"github.com/opencord/voltha-go/common/log"
 	"github.com/opencord/voltha-go/rw_core/graph"
 	fu "github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"github.com/stretchr/testify/assert"
diff --git a/rw_core/graph/device_graph.go b/rw_core/graph/device_graph.go
index 357a709..5bfaf96 100644
--- a/rw_core/graph/device_graph.go
+++ b/rw_core/graph/device_graph.go
@@ -20,7 +20,7 @@
 	"errors"
 	"fmt"
 	"github.com/gyuho/goraph"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"strconv"
 	"strings"
diff --git a/rw_core/main.go b/rw_core/main.go
index e74d1bb..09c72f4 100644
--- a/rw_core/main.go
+++ b/rw_core/main.go
@@ -19,15 +19,15 @@
 	"context"
 	"errors"
 	"fmt"
-	grpcserver "github.com/opencord/voltha-go/common/grpc"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/common/probe"
-	"github.com/opencord/voltha-go/common/version"
-	"github.com/opencord/voltha-go/db/kvstore"
-	"github.com/opencord/voltha-go/kafka"
 	"github.com/opencord/voltha-go/rw_core/config"
 	c "github.com/opencord/voltha-go/rw_core/core"
 	"github.com/opencord/voltha-go/rw_core/utils"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	grpcserver "github.com/opencord/voltha-lib-go/pkg/grpc"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
+	"github.com/opencord/voltha-lib-go/pkg/probe"
+	"github.com/opencord/voltha-lib-go/pkg/version"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"os"
 	"os/signal"
diff --git a/rw_core/utils/core_utils_test.go b/rw_core/utils/core_utils_test.go
index cb0abfe..27d24a1 100644
--- a/rw_core/utils/core_utils_test.go
+++ b/rw_core/utils/core_utils_test.go
@@ -16,7 +16,7 @@
 package utils
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/stretchr/testify/assert"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
diff --git a/rw_core/utils/flow_utils.go b/rw_core/utils/flow_utils.go
index 4293126..7ead52d 100644
--- a/rw_core/utils/flow_utils.go
+++ b/rw_core/utils/flow_utils.go
@@ -21,7 +21,7 @@
 	"fmt"
 	"github.com/cevaris/ordered_map"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"math/big"
 	"strings"
diff --git a/rw_core/utils/flow_utils_test.go b/rw_core/utils/flow_utils_test.go
index c1b1da9..c51a778 100644
--- a/rw_core/utils/flow_utils_test.go
+++ b/rw_core/utils/flow_utils_test.go
@@ -16,7 +16,7 @@
 package utils
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/stretchr/testify/assert"
 	"google.golang.org/grpc/codes"
diff --git a/tests/core/api/grpc_nbi_api_handler_client_test.go b/tests/core/api/grpc_nbi_api_handler_client_test.go
index 4eb5d1c..be197e6 100644
--- a/tests/core/api/grpc_nbi_api_handler_client_test.go
+++ b/tests/core/api/grpc_nbi_api_handler_client_test.go
@@ -22,8 +22,8 @@
 	"context"
 	"fmt"
 	"github.com/golang/protobuf/ptypes/empty"
-	com "github.com/opencord/voltha-go/adapters/common"
 	"github.com/opencord/voltha-go/common/log"
+	com "github.com/opencord/voltha-lib-go/pkg/adapters/common"
 	"github.com/opencord/voltha-protos/go/common"
 	"github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/tests/core/concurrency/core_concurrency_test.go b/tests/core/concurrency/core_concurrency_test.go
index 5fbda49..d183a3a 100644
--- a/tests/core/concurrency/core_concurrency_test.go
+++ b/tests/core/concurrency/core_concurrency_test.go
@@ -23,8 +23,8 @@
 	"fmt"
 	"github.com/golang/protobuf/ptypes/empty"
 	"github.com/google/uuid"
-	com "github.com/opencord/voltha-go/adapters/common"
 	"github.com/opencord/voltha-go/common/log"
+	com "github.com/opencord/voltha-lib-go/pkg/adapters/common"
 	"github.com/opencord/voltha-protos/go/common"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"github.com/stretchr/testify/assert"
diff --git a/tests/utils/test_utils.go b/tests/utils/test_utils.go
index 52bdfbc..b140c60 100644
--- a/tests/utils/test_utils.go
+++ b/tests/utils/test_utils.go
@@ -26,7 +26,7 @@
 
 	"github.com/golang/protobuf/ptypes/empty"
 	"github.com/google/uuid"
-	com "github.com/opencord/voltha-go/adapters/common"
+	com "github.com/opencord/voltha-lib-go/pkg/adapters/common"
 	"github.com/opencord/voltha-protos/go/common"
 	ofp "github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/vendor/github.com/boljen/go-bitmap/LICENSE b/vendor/github.com/boljen/go-bitmap/LICENSE
deleted file mode 100644
index 13cc28c..0000000
--- a/vendor/github.com/boljen/go-bitmap/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Bol Christophe
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/boljen/go-bitmap/README.md b/vendor/github.com/boljen/go-bitmap/README.md
deleted file mode 100644
index 5ff5eba..0000000
--- a/vendor/github.com/boljen/go-bitmap/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# Bitmap (Go)
-
-Package bitmap implements (thread-safe) bitmap functions and abstractions
-
-## Install
-
-    go get github.com/boljen/go-bitmap
-
-## Documentation
-
-See [godoc](https://godoc.org/github.com/boljen/go-bitmap)
-
-## Example
-
-    package main
-
-    import (
-        "fmt"
-        "github.com/boljen/go-bitmap"
-    )
-
-    func main() {
-        bm := bitmap.New(100)
-        bm.Set(0, true)
-        fmt.Println(bm.Get(0))
-    }
-
-## License
-
-MIT
diff --git a/vendor/github.com/boljen/go-bitmap/atomic.go b/vendor/github.com/boljen/go-bitmap/atomic.go
deleted file mode 100644
index f04d76e..0000000
--- a/vendor/github.com/boljen/go-bitmap/atomic.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package bitmap
-
-import (
-	"sync/atomic"
-	"unsafe"
-)
-
-var oobPanic = "SetAtomic not allowed on a bitmapSlice of cap() < 4"
-
-// SetAtomic is similar to Set except that it performs the operation atomically.
-func SetAtomic(bitmap []byte, targetBit int, targetValue bool) {
-	ov := (*[1]uint32)(unsafe.Pointer(&bitmap[targetBit/32]))[:]
-	SetAtomicUint32(ov, targetBit%32, targetValue)
-}
-
-// SetAtomic is similar to Set except that it performs the operation atomically.
-// It needs a bitmapSlice where the capacity is at least 4 bytes.
-func _SetAtomic(bitmapSlice []byte, targetBit int, targetValue bool) {
-	targetByteIndex := targetBit / 8
-	targetBitIndex := targetBit % 8
-	targetOffset := 0
-
-	// SetAtomic needs to modify 4 bytes of data so we panic when the slice
-	// doesn't have a capacity of at least 4 bytes.
-	if cap(bitmapSlice) < 4 {
-		panic(oobPanic)
-	}
-
-	// Calculate the Offset of the targetByte inside the 4-byte atomic batch.
-	// This is needed to ensure that atomic operations can happen as long as
-	// the bitmapSlice equals 4 bytes or more.
-	if cap(bitmapSlice) < targetByteIndex+3 {
-		targetOffset = cap(bitmapSlice) - targetByteIndex
-	}
-
-	// This gets a pointer to the memory of 4 bytes inside the bitmapSlice.
-	// It stores this pointer as an *uint32 so that it can be used to
-	// execute sync.atomic operations.
-	targetBytePointer := (*uint32)(unsafe.Pointer(&bitmapSlice[targetByteIndex-targetOffset]))
-
-	for {
-		// localValue is a copy of the uint32 value at *targetBytePointer.
-		// It's used to check whether the targetBit must be updated,
-		// and if so, to construct the new value for targetBytePointer.
-		localValue := atomic.LoadUint32(targetBytePointer)
-
-		// This "neutralizes" the uint32 conversion by getting a pointer to the
-		// 4-byte array stored undereneath the uint32.
-		targetByteCopyPointer := (*[4]byte)(unsafe.Pointer(&localValue))
-
-		// Work is done when targetBit is already set to targetValue.
-		if GetBit(targetByteCopyPointer[targetOffset], targetBitIndex) == targetValue {
-			return
-		}
-
-		// Modify the targetBit and update memory so that the targetBit is the only bit
-		// that has been modified in the batch.
-		referenceValue := localValue
-		SetBitRef(&targetByteCopyPointer[targetOffset], targetBitIndex, targetValue)
-		if atomic.CompareAndSwapUint32(targetBytePointer, referenceValue, localValue) {
-			break
-		}
-	}
-}
diff --git a/vendor/github.com/boljen/go-bitmap/bitmap.go b/vendor/github.com/boljen/go-bitmap/bitmap.go
deleted file mode 100644
index dfe5cc2..0000000
--- a/vendor/github.com/boljen/go-bitmap/bitmap.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Package bitmap implements (thread-safe) bitmap functions and abstractions.
-//
-// Installation
-//
-// 	  go get github.com/boljen/go-bitmap
-package bitmap
-
-import "sync"
-
-var (
-	tA = [8]byte{1, 2, 4, 8, 16, 32, 64, 128}
-	tB = [8]byte{254, 253, 251, 247, 239, 223, 191, 127}
-)
-
-func dataOrCopy(d []byte, c bool) []byte {
-	if !c {
-		return d
-	}
-	ndata := make([]byte, len(d))
-	copy(ndata, d)
-	return ndata
-}
-
-// NewSlice creates a new byteslice with length l (in bits).
-// The actual size in bits might be up to 7 bits larger because
-// they are stored in a byteslice.
-func NewSlice(l int) []byte {
-	remainder := l % 8
-	if remainder != 0 {
-		remainder = 1
-	}
-	return make([]byte, l/8+remainder)
-}
-
-// Get returns the value of bit i from map m.
-// It doesn't check the bounds of the slice.
-func Get(m []byte, i int) bool {
-	return m[i/8]&tA[i%8] != 0
-}
-
-// Set sets bit i of map m to value v.
-// It doesn't check the bounds of the slice.
-func Set(m []byte, i int, v bool) {
-	index := i / 8
-	bit := i % 8
-	if v {
-		m[index] = m[index] | tA[bit]
-	} else {
-		m[index] = m[index] & tB[bit]
-	}
-}
-
-// GetBit returns the value of bit i of byte b.
-// The bit index must be between 0 and 7.
-func GetBit(b byte, i int) bool {
-	return b&tA[i] != 0
-}
-
-// SetBit sets bit i of byte b to value v.
-// The bit index must be between 0 and 7.
-func SetBit(b byte, i int, v bool) byte {
-	if v {
-		return b | tA[i]
-	}
-	return b & tB[i]
-}
-
-// SetBitRef sets bit i of byte *b to value v.
-func SetBitRef(b *byte, i int, v bool) {
-	if v {
-		*b = *b | tA[i]
-	} else {
-		*b = *b & tB[i]
-	}
-}
-
-// Len returns the length (in bits) of the provided byteslice.
-// It will always be a multipile of 8 bits.
-func Len(m []byte) int {
-	return len(m) * 8
-}
-
-// Bitmap is a byteslice with bitmap functions.
-// Creating one form existing data is as simple as bitmap := Bitmap(data).
-type Bitmap []byte
-
-// New creates a new Bitmap instance with length l (in bits).
-func New(l int) Bitmap {
-	return NewSlice(l)
-}
-
-// Len wraps around the Len function.
-func (b Bitmap) Len() int {
-	return Len(b)
-}
-
-// Get wraps around the Get function.
-func (b Bitmap) Get(i int) bool {
-	return Get(b, i)
-}
-
-// Set wraps around the Set function.
-func (b Bitmap) Set(i int, v bool) {
-	Set(b, i, v)
-}
-
-// Data returns the data of the bitmap.
-// If copy is false the actual underlying slice will be returned.
-func (b Bitmap) Data(copy bool) []byte {
-	return dataOrCopy(b, copy)
-}
-
-// Threadsafe implements thread-safe read- and write locking for the bitmap.
-type Threadsafe struct {
-	bm Bitmap
-	mu sync.RWMutex
-}
-
-// TSFromData creates a new Threadsafe using the provided data.
-// If copy is true the actual slice will be used.
-func TSFromData(data []byte, copy bool) *Threadsafe {
-	return &Threadsafe{
-		bm: Bitmap(dataOrCopy(data, copy)),
-	}
-}
-
-// NewTS creates a new Threadsafe instance.
-func NewTS(length int) *Threadsafe {
-	return &Threadsafe{
-		bm: New(length),
-	}
-}
-
-// Data returns the data of the bitmap.
-// If copy is false the actual underlying slice will be returned.
-func (b *Threadsafe) Data(copy bool) []byte {
-	b.mu.RLock()
-	data := dataOrCopy(b.bm, copy)
-	b.mu.RUnlock()
-	return data
-}
-
-// Len wraps around the Len function.
-func (b Threadsafe) Len() int {
-	b.mu.RLock()
-	l := b.bm.Len()
-	b.mu.RUnlock()
-	return l
-}
-
-// Get wraps around the Get function.
-func (b Threadsafe) Get(i int) bool {
-	b.mu.RLock()
-	v := b.bm.Get(i)
-	b.mu.RUnlock()
-	return v
-}
-
-// Set wraps around the Set function.
-func (b Threadsafe) Set(i int, v bool) {
-	b.mu.Lock()
-	b.bm.Set(i, v)
-	b.mu.Unlock()
-}
-
-// Concurrent is a bitmap implementation that achieves thread-safety
-// using atomic operations along with some unsafe.
-// It performs atomic operations on 32bits of data.
-type Concurrent []byte
-
-// NewConcurrent returns a concurrent bitmap.
-// It will create a bitmap
-func NewConcurrent(l int) Concurrent {
-	remainder := l % 8
-	if remainder != 0 {
-		remainder = 1
-	}
-	return make([]byte, l/8+remainder, l/8+remainder+3)
-}
-
-// Get wraps around the Get function.
-func (c Concurrent) Get(b int) bool {
-	return Get(c, b)
-}
-
-// Set wraps around the SetAtomic function.
-func (c Concurrent) Set(b int, v bool) {
-	SetAtomic(c, b, v)
-}
-
-// Len wraps around the Len function.
-func (c Concurrent) Len() int {
-	return Len(c)
-}
-
-// Data returns the data of the bitmap.
-// If copy is false the actual underlying slice will be returned.
-func (c Concurrent) Data(copy bool) []byte {
-	return dataOrCopy(c, copy)
-}
diff --git a/vendor/github.com/boljen/go-bitmap/uintmap.go b/vendor/github.com/boljen/go-bitmap/uintmap.go
deleted file mode 100644
index 72cbf4a..0000000
--- a/vendor/github.com/boljen/go-bitmap/uintmap.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package bitmap
-
-import (
-	"sync/atomic"
-	"unsafe"
-)
-
-// SetAtomicUint32 sets the target bit to the target value inside the uint32
-// encded bitmap.
-func SetAtomicUint32(bitmap []uint32, targetBit int, targetValue bool) {
-	targetIndex := targetBit / 32
-	BitOffset := targetBit % 32
-
-	for {
-		localValue := atomic.LoadUint32(&bitmap[targetIndex])
-		targetBytes := (*[4]byte)(unsafe.Pointer(&localValue))[:]
-		if Get(targetBytes, BitOffset) == targetValue {
-			return
-		}
-		referenceValue := localValue
-		Set(targetBytes, BitOffset, targetValue)
-		if atomic.CompareAndSwapUint32(&bitmap[targetIndex], referenceValue, localValue) {
-			break
-		}
-	}
-}
-
-// GetAtomicUint32 gets the target bit from an uint32 encoded bitmap.
-func GetAtomicUint32(bitmap []uint32, targetBit int) bool {
-	data := (*[4]byte)(unsafe.Pointer(&bitmap[targetBit/32]))[:]
-	return Get(data, targetBit%32)
-}
diff --git a/adapters/README.md b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/README.md
similarity index 100%
rename from adapters/README.md
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/README.md
diff --git a/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/adapter_proxy_if.go
similarity index 100%
rename from adapters/adapterif/adapter_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/adapter_proxy_if.go
diff --git a/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/core_proxy_if.go
similarity index 100%
rename from adapters/adapterif/core_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/core_proxy_if.go
diff --git a/adapters/adapterif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/events_proxy_if.go
similarity index 100%
rename from adapters/adapterif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/adapterif/events_proxy_if.go
diff --git a/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/adapter_proxy.go
similarity index 96%
rename from adapters/common/adapter_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/adapter_proxy.go
index 6c32422..fee70c8 100644
--- a/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/adapter_proxy.go
@@ -21,8 +21,8 @@
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/any"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"time"
 )
diff --git a/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/core_proxy.go
similarity index 99%
rename from adapters/common/core_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/core_proxy.go
index c9f332c..30117ca 100644
--- a/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/core_proxy.go
@@ -19,8 +19,8 @@
 	"context"
 	"github.com/golang/protobuf/ptypes"
 	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"github.com/opencord/voltha-protos/go/voltha"
 	"google.golang.org/grpc/codes"
diff --git a/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/events_proxy.go
similarity index 95%
rename from adapters/common/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/events_proxy.go
index 34fcde7..25b1be4 100644
--- a/adapters/common/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/events_proxy.go
@@ -23,9 +23,9 @@
 	"strings"
 	"time"
 
-	"github.com/opencord/voltha-go/adapters/adapterif"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 )
 
diff --git a/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/performance_metrics.go
similarity index 100%
rename from adapters/common/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/performance_metrics.go
diff --git a/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/request_handler.go
similarity index 98%
rename from adapters/common/request_handler.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/request_handler.go
index 55e04d7..27f9846 100644
--- a/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/request_handler.go
@@ -19,10 +19,10 @@
 	"errors"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-go/adapters"
-	"github.com/opencord/voltha-go/adapters/adapterif"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"github.com/opencord/voltha-protos/go/openflow_13"
 	"github.com/opencord/voltha-protos/go/voltha"
diff --git a/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/utils.go
similarity index 100%
rename from adapters/common/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/common/utils.go
diff --git a/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/pkg/adapters/iAdapter.go
similarity index 100%
rename from adapters/iAdapter.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/adapters/iAdapter.go
diff --git a/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/client.go
similarity index 97%
rename from db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/client.go
index 67c9219..f40d10e 100644
--- a/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/client.go
@@ -16,7 +16,7 @@
 package kvstore
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 )
 
 const (
diff --git a/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/consulclient.go
similarity index 99%
rename from db/kvstore/consulclient.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/consulclient.go
index c4fa0af..c3e3999 100644
--- a/db/kvstore/consulclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/consulclient.go
@@ -19,7 +19,7 @@
 	"bytes"
 	"context"
 	"errors"
-	log "github.com/opencord/voltha-go/common/log"
+	log "github.com/opencord/voltha-lib-go/pkg/log"
 	"sync"
 	"time"
 	//log "ciena.com/coordinator/common"
diff --git a/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/etcdclient.go
similarity index 99%
rename from db/kvstore/etcdclient.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/etcdclient.go
index 3af1ef2..beac4e0 100644
--- a/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/etcdclient.go
@@ -19,7 +19,7 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	v3Client "go.etcd.io/etcd/clientv3"
 	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
 	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
diff --git a/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/kvutils.go
similarity index 100%
rename from db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/kvstore/kvutils.go
diff --git a/db/model/backend.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/backend.go
similarity index 97%
rename from db/model/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/backend.go
index 981a1d5..fb2c813 100644
--- a/db/model/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/backend.go
@@ -19,8 +19,8 @@
 import (
 	"errors"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"strconv"
 	"sync"
 	"time"
diff --git a/db/model/branch.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/branch.go
similarity index 98%
rename from db/model/branch.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/branch.go
index 3389291..2075b7e 100644
--- a/db/model/branch.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/branch.go
@@ -17,7 +17,7 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"sync"
 )
 
diff --git a/db/model/callback_type.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/callback_type.go
similarity index 100%
rename from db/model/callback_type.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/callback_type.go
diff --git a/db/model/child_type.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/child_type.go
similarity index 98%
rename from db/model/child_type.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/child_type.go
index 250de9c..fae9844 100644
--- a/db/model/child_type.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/child_type.go
@@ -20,7 +20,7 @@
 	desc "github.com/golang/protobuf/descriptor"
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/common"
 	"reflect"
 	"strconv"
diff --git a/db/model/data_revision.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/data_revision.go
similarity index 97%
rename from db/model/data_revision.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/data_revision.go
index 0763d09..b930ae1 100644
--- a/db/model/data_revision.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/data_revision.go
@@ -22,7 +22,7 @@
 	"encoding/json"
 	"fmt"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 )
 
diff --git a/db/model/event_bus.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus.go
similarity index 97%
rename from db/model/event_bus.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus.go
index 335d43f..3af2556 100644
--- a/db/model/event_bus.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus.go
@@ -19,7 +19,7 @@
 import (
 	"encoding/json"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 )
 
diff --git a/db/model/event_bus_client.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus_client.go
similarity index 95%
rename from db/model/event_bus_client.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus_client.go
index c9c1314..fdc02f9 100644
--- a/db/model/event_bus_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/event_bus_client.go
@@ -17,7 +17,7 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"github.com/opencord/voltha-protos/go/voltha"
 )
 
diff --git a/db/model/merge.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/merge.go
similarity index 98%
rename from db/model/merge.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/merge.go
index c59dda4..752d025 100644
--- a/db/model/merge.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/merge.go
@@ -17,7 +17,7 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 )
 
 func revisionsAreEqual(a, b []Revision) bool {
diff --git a/db/model/model.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/model.go
similarity index 95%
rename from db/model/model.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/model.go
index 3446303..8087919 100644
--- a/db/model/model.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/model.go
@@ -16,7 +16,7 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 )
 
 func init() {
diff --git a/db/model/node.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/node.go
similarity index 99%
rename from db/model/node.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/node.go
index c9815fa..7fd0250 100644
--- a/db/model/node.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/node.go
@@ -23,7 +23,7 @@
 	"context"
 	"fmt"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 	"strings"
 	"sync"
diff --git a/db/model/non_persisted_revision.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/non_persisted_revision.go
similarity index 98%
rename from db/model/non_persisted_revision.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/non_persisted_revision.go
index 6900c5d..88320cb 100644
--- a/db/model/non_persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/non_persisted_revision.go
@@ -21,8 +21,8 @@
 	"crypto/md5"
 	"fmt"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 	"sort"
 	"sync"
diff --git a/db/model/persisted_revision.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/persisted_revision.go
similarity index 99%
rename from db/model/persisted_revision.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/persisted_revision.go
index d2d228f..c644e14 100644
--- a/db/model/persisted_revision.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/persisted_revision.go
@@ -22,8 +22,8 @@
 	"context"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
-	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 	"strings"
 	"sync"
diff --git a/db/model/profiling.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/profiling.go
similarity index 98%
rename from db/model/profiling.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/profiling.go
index 874b035..c50c9f6 100644
--- a/db/model/profiling.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/profiling.go
@@ -17,7 +17,7 @@
 package model
 
 import (
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"sync"
 )
 
diff --git a/db/model/proxy.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/proxy.go
similarity index 99%
rename from db/model/proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/proxy.go
index 5c4d772..2d2c24e 100644
--- a/db/model/proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/proxy.go
@@ -22,7 +22,7 @@
 	"errors"
 	"fmt"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 	"runtime"
 	"strings"
diff --git a/db/model/revision.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/revision.go
similarity index 96%
rename from db/model/revision.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/revision.go
index 6f52248..bb61355 100644
--- a/db/model/revision.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/revision.go
@@ -17,7 +17,7 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-go/db/kvstore"
+	"github.com/opencord/voltha-lib-go/pkg/db/kvstore"
 	"time"
 )
 
diff --git a/db/model/root.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/root.go
similarity index 99%
rename from db/model/root.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/root.go
index 8331e11..20c3721 100644
--- a/db/model/root.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/root.go
@@ -22,7 +22,7 @@
 	"encoding/json"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"reflect"
 	"sync"
 )
diff --git a/db/model/transaction.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/transaction.go
similarity index 97%
rename from db/model/transaction.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/transaction.go
index 7529ff2..5bef77e 100644
--- a/db/model/transaction.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/transaction.go
@@ -17,7 +17,7 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 )
 
 type Transaction struct {
diff --git a/db/model/utils.go b/vendor/github.com/opencord/voltha-lib-go/pkg/db/model/utils.go
similarity index 100%
rename from db/model/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/db/model/utils.go
diff --git a/common/grpc/security.go b/vendor/github.com/opencord/voltha-lib-go/pkg/grpc/security.go
similarity index 100%
rename from common/grpc/security.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/grpc/security.go
diff --git a/common/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/pkg/grpc/server.go
similarity index 97%
rename from common/grpc/server.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/grpc/server.go
index e3f3d99..d2438e0 100644
--- a/common/grpc/server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/grpc/server.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/credentials"
 	"net"
diff --git a/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/client.go
old mode 100755
new mode 100644
similarity index 100%
rename from kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/kafka/client.go
diff --git a/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/kafka_inter_container_library.go
similarity index 99%
rename from kafka/kafka_inter_container_library.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/kafka/kafka_inter_container_library.go
index f9b3319..d2e0702 100644
--- a/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/kafka_inter_container_library.go
@@ -23,7 +23,7 @@
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/any"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"reflect"
 	"strings"
diff --git a/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/sarama_client.go
old mode 100755
new mode 100644
similarity index 99%
rename from kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/kafka/sarama_client.go
index 9e3ce0c..cec18ec
--- a/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/sarama_client.go
@@ -22,7 +22,7 @@
 	scc "github.com/bsm/sarama-cluster"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	ic "github.com/opencord/voltha-protos/go/inter_container"
 	"strings"
 	"sync"
diff --git a/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/pkg/kafka/utils.go
similarity index 100%
rename from kafka/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/kafka/utils.go
diff --git a/common/log/log.go b/vendor/github.com/opencord/voltha-lib-go/pkg/log/log.go
similarity index 100%
rename from common/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/log/log.go
diff --git a/common/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/pkg/probe/probe.go
similarity index 98%
rename from common/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/probe/probe.go
index 8a8e485..192c7ca 100644
--- a/common/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/pkg/probe/probe.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-go/common/log"
+	"github.com/opencord/voltha-lib-go/pkg/log"
 	"net/http"
 	"sync"
 )
diff --git a/common/version/version.go b/vendor/github.com/opencord/voltha-lib-go/pkg/version/version.go
similarity index 100%
rename from common/version/version.go
rename to vendor/github.com/opencord/voltha-lib-go/pkg/version/version.go
diff --git a/vendor/github.com/opencord/voltha-protos/go/tech_profile/tech_profile.pb.go b/vendor/github.com/opencord/voltha-protos/go/tech_profile/tech_profile.pb.go
deleted file mode 100644
index 4c6d4b3..0000000
--- a/vendor/github.com/opencord/voltha-protos/go/tech_profile/tech_profile.pb.go
+++ /dev/null
@@ -1,971 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: voltha_protos/tech_profile.proto
-
-package tech_profile
-
-import (
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	_ "google.golang.org/genproto/googleapis/api/annotations"
-	math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-type Direction int32
-
-const (
-	Direction_UPSTREAM      Direction = 0
-	Direction_DOWNSTREAM    Direction = 1
-	Direction_BIDIRECTIONAL Direction = 2
-)
-
-var Direction_name = map[int32]string{
-	0: "UPSTREAM",
-	1: "DOWNSTREAM",
-	2: "BIDIRECTIONAL",
-}
-
-var Direction_value = map[string]int32{
-	"UPSTREAM":      0,
-	"DOWNSTREAM":    1,
-	"BIDIRECTIONAL": 2,
-}
-
-func (x Direction) String() string {
-	return proto.EnumName(Direction_name, int32(x))
-}
-
-func (Direction) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{0}
-}
-
-type SchedulingPolicy int32
-
-const (
-	SchedulingPolicy_WRR            SchedulingPolicy = 0
-	SchedulingPolicy_StrictPriority SchedulingPolicy = 1
-	SchedulingPolicy_Hybrid         SchedulingPolicy = 2
-)
-
-var SchedulingPolicy_name = map[int32]string{
-	0: "WRR",
-	1: "StrictPriority",
-	2: "Hybrid",
-}
-
-var SchedulingPolicy_value = map[string]int32{
-	"WRR":            0,
-	"StrictPriority": 1,
-	"Hybrid":         2,
-}
-
-func (x SchedulingPolicy) String() string {
-	return proto.EnumName(SchedulingPolicy_name, int32(x))
-}
-
-func (SchedulingPolicy) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{1}
-}
-
-type AdditionalBW int32
-
-const (
-	AdditionalBW_AdditionalBW_None       AdditionalBW = 0
-	AdditionalBW_AdditionalBW_NA         AdditionalBW = 1
-	AdditionalBW_AdditionalBW_BestEffort AdditionalBW = 2
-	AdditionalBW_AdditionalBW_Auto       AdditionalBW = 3
-)
-
-var AdditionalBW_name = map[int32]string{
-	0: "AdditionalBW_None",
-	1: "AdditionalBW_NA",
-	2: "AdditionalBW_BestEffort",
-	3: "AdditionalBW_Auto",
-}
-
-var AdditionalBW_value = map[string]int32{
-	"AdditionalBW_None":       0,
-	"AdditionalBW_NA":         1,
-	"AdditionalBW_BestEffort": 2,
-	"AdditionalBW_Auto":       3,
-}
-
-func (x AdditionalBW) String() string {
-	return proto.EnumName(AdditionalBW_name, int32(x))
-}
-
-func (AdditionalBW) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{2}
-}
-
-type DiscardPolicy int32
-
-const (
-	DiscardPolicy_TailDrop  DiscardPolicy = 0
-	DiscardPolicy_WTailDrop DiscardPolicy = 1
-	DiscardPolicy_Red       DiscardPolicy = 2
-	DiscardPolicy_WRed      DiscardPolicy = 3
-)
-
-var DiscardPolicy_name = map[int32]string{
-	0: "TailDrop",
-	1: "WTailDrop",
-	2: "Red",
-	3: "WRed",
-}
-
-var DiscardPolicy_value = map[string]int32{
-	"TailDrop":  0,
-	"WTailDrop": 1,
-	"Red":       2,
-	"WRed":      3,
-}
-
-func (x DiscardPolicy) String() string {
-	return proto.EnumName(DiscardPolicy_name, int32(x))
-}
-
-func (DiscardPolicy) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{3}
-}
-
-type InferredAdditionBWIndication int32
-
-const (
-	InferredAdditionBWIndication_InferredAdditionBWIndication_None       InferredAdditionBWIndication = 0
-	InferredAdditionBWIndication_InferredAdditionBWIndication_Assured    InferredAdditionBWIndication = 1
-	InferredAdditionBWIndication_InferredAdditionBWIndication_BestEffort InferredAdditionBWIndication = 2
-)
-
-var InferredAdditionBWIndication_name = map[int32]string{
-	0: "InferredAdditionBWIndication_None",
-	1: "InferredAdditionBWIndication_Assured",
-	2: "InferredAdditionBWIndication_BestEffort",
-}
-
-var InferredAdditionBWIndication_value = map[string]int32{
-	"InferredAdditionBWIndication_None":       0,
-	"InferredAdditionBWIndication_Assured":    1,
-	"InferredAdditionBWIndication_BestEffort": 2,
-}
-
-func (x InferredAdditionBWIndication) String() string {
-	return proto.EnumName(InferredAdditionBWIndication_name, int32(x))
-}
-
-func (InferredAdditionBWIndication) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{4}
-}
-
-type SchedulerConfig struct {
-	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
-	AdditionalBw         AdditionalBW     `protobuf:"varint,2,opt,name=additional_bw,json=additionalBw,proto3,enum=tech_profile.AdditionalBW" json:"additional_bw,omitempty"`
-	Priority             uint32           `protobuf:"fixed32,3,opt,name=priority,proto3" json:"priority,omitempty"`
-	Weight               uint32           `protobuf:"fixed32,4,opt,name=weight,proto3" json:"weight,omitempty"`
-	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *SchedulerConfig) Reset()         { *m = SchedulerConfig{} }
-func (m *SchedulerConfig) String() string { return proto.CompactTextString(m) }
-func (*SchedulerConfig) ProtoMessage()    {}
-func (*SchedulerConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{0}
-}
-
-func (m *SchedulerConfig) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_SchedulerConfig.Unmarshal(m, b)
-}
-func (m *SchedulerConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_SchedulerConfig.Marshal(b, m, deterministic)
-}
-func (m *SchedulerConfig) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SchedulerConfig.Merge(m, src)
-}
-func (m *SchedulerConfig) XXX_Size() int {
-	return xxx_messageInfo_SchedulerConfig.Size(m)
-}
-func (m *SchedulerConfig) XXX_DiscardUnknown() {
-	xxx_messageInfo_SchedulerConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SchedulerConfig proto.InternalMessageInfo
-
-func (m *SchedulerConfig) GetDirection() Direction {
-	if m != nil {
-		return m.Direction
-	}
-	return Direction_UPSTREAM
-}
-
-func (m *SchedulerConfig) GetAdditionalBw() AdditionalBW {
-	if m != nil {
-		return m.AdditionalBw
-	}
-	return AdditionalBW_AdditionalBW_None
-}
-
-func (m *SchedulerConfig) GetPriority() uint32 {
-	if m != nil {
-		return m.Priority
-	}
-	return 0
-}
-
-func (m *SchedulerConfig) GetWeight() uint32 {
-	if m != nil {
-		return m.Weight
-	}
-	return 0
-}
-
-func (m *SchedulerConfig) GetSchedPolicy() SchedulingPolicy {
-	if m != nil {
-		return m.SchedPolicy
-	}
-	return SchedulingPolicy_WRR
-}
-
-type TrafficShapingInfo struct {
-	Cir                  uint32                       `protobuf:"fixed32,1,opt,name=cir,proto3" json:"cir,omitempty"`
-	Cbs                  uint32                       `protobuf:"fixed32,2,opt,name=cbs,proto3" json:"cbs,omitempty"`
-	Pir                  uint32                       `protobuf:"fixed32,3,opt,name=pir,proto3" json:"pir,omitempty"`
-	Pbs                  uint32                       `protobuf:"fixed32,4,opt,name=pbs,proto3" json:"pbs,omitempty"`
-	Gir                  uint32                       `protobuf:"fixed32,5,opt,name=gir,proto3" json:"gir,omitempty"`
-	AddBwInd             InferredAdditionBWIndication `protobuf:"varint,6,opt,name=add_bw_ind,json=addBwInd,proto3,enum=tech_profile.InferredAdditionBWIndication" json:"add_bw_ind,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
-	XXX_unrecognized     []byte                       `json:"-"`
-	XXX_sizecache        int32                        `json:"-"`
-}
-
-func (m *TrafficShapingInfo) Reset()         { *m = TrafficShapingInfo{} }
-func (m *TrafficShapingInfo) String() string { return proto.CompactTextString(m) }
-func (*TrafficShapingInfo) ProtoMessage()    {}
-func (*TrafficShapingInfo) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{1}
-}
-
-func (m *TrafficShapingInfo) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TrafficShapingInfo.Unmarshal(m, b)
-}
-func (m *TrafficShapingInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TrafficShapingInfo.Marshal(b, m, deterministic)
-}
-func (m *TrafficShapingInfo) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TrafficShapingInfo.Merge(m, src)
-}
-func (m *TrafficShapingInfo) XXX_Size() int {
-	return xxx_messageInfo_TrafficShapingInfo.Size(m)
-}
-func (m *TrafficShapingInfo) XXX_DiscardUnknown() {
-	xxx_messageInfo_TrafficShapingInfo.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TrafficShapingInfo proto.InternalMessageInfo
-
-func (m *TrafficShapingInfo) GetCir() uint32 {
-	if m != nil {
-		return m.Cir
-	}
-	return 0
-}
-
-func (m *TrafficShapingInfo) GetCbs() uint32 {
-	if m != nil {
-		return m.Cbs
-	}
-	return 0
-}
-
-func (m *TrafficShapingInfo) GetPir() uint32 {
-	if m != nil {
-		return m.Pir
-	}
-	return 0
-}
-
-func (m *TrafficShapingInfo) GetPbs() uint32 {
-	if m != nil {
-		return m.Pbs
-	}
-	return 0
-}
-
-func (m *TrafficShapingInfo) GetGir() uint32 {
-	if m != nil {
-		return m.Gir
-	}
-	return 0
-}
-
-func (m *TrafficShapingInfo) GetAddBwInd() InferredAdditionBWIndication {
-	if m != nil {
-		return m.AddBwInd
-	}
-	return InferredAdditionBWIndication_InferredAdditionBWIndication_None
-}
-
-type TrafficScheduler struct {
-	Direction            Direction           `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
-	AllocId              uint32              `protobuf:"fixed32,2,opt,name=alloc_id,json=allocId,proto3" json:"alloc_id,omitempty"`
-	Scheduler            *SchedulerConfig    `protobuf:"bytes,3,opt,name=scheduler,proto3" json:"scheduler,omitempty"`
-	TrafficShapingInfo   *TrafficShapingInfo `protobuf:"bytes,4,opt,name=traffic_shaping_info,json=trafficShapingInfo,proto3" json:"traffic_shaping_info,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *TrafficScheduler) Reset()         { *m = TrafficScheduler{} }
-func (m *TrafficScheduler) String() string { return proto.CompactTextString(m) }
-func (*TrafficScheduler) ProtoMessage()    {}
-func (*TrafficScheduler) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{2}
-}
-
-func (m *TrafficScheduler) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TrafficScheduler.Unmarshal(m, b)
-}
-func (m *TrafficScheduler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TrafficScheduler.Marshal(b, m, deterministic)
-}
-func (m *TrafficScheduler) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TrafficScheduler.Merge(m, src)
-}
-func (m *TrafficScheduler) XXX_Size() int {
-	return xxx_messageInfo_TrafficScheduler.Size(m)
-}
-func (m *TrafficScheduler) XXX_DiscardUnknown() {
-	xxx_messageInfo_TrafficScheduler.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TrafficScheduler proto.InternalMessageInfo
-
-func (m *TrafficScheduler) GetDirection() Direction {
-	if m != nil {
-		return m.Direction
-	}
-	return Direction_UPSTREAM
-}
-
-func (m *TrafficScheduler) GetAllocId() uint32 {
-	if m != nil {
-		return m.AllocId
-	}
-	return 0
-}
-
-func (m *TrafficScheduler) GetScheduler() *SchedulerConfig {
-	if m != nil {
-		return m.Scheduler
-	}
-	return nil
-}
-
-func (m *TrafficScheduler) GetTrafficShapingInfo() *TrafficShapingInfo {
-	if m != nil {
-		return m.TrafficShapingInfo
-	}
-	return nil
-}
-
-type TrafficSchedulers struct {
-	IntfId               uint32              `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
-	OnuId                uint32              `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
-	UniId                uint32              `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	PortNo               uint32              `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
-	TrafficScheds        []*TrafficScheduler `protobuf:"bytes,3,rep,name=traffic_scheds,json=trafficScheds,proto3" json:"traffic_scheds,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *TrafficSchedulers) Reset()         { *m = TrafficSchedulers{} }
-func (m *TrafficSchedulers) String() string { return proto.CompactTextString(m) }
-func (*TrafficSchedulers) ProtoMessage()    {}
-func (*TrafficSchedulers) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{3}
-}
-
-func (m *TrafficSchedulers) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TrafficSchedulers.Unmarshal(m, b)
-}
-func (m *TrafficSchedulers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TrafficSchedulers.Marshal(b, m, deterministic)
-}
-func (m *TrafficSchedulers) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TrafficSchedulers.Merge(m, src)
-}
-func (m *TrafficSchedulers) XXX_Size() int {
-	return xxx_messageInfo_TrafficSchedulers.Size(m)
-}
-func (m *TrafficSchedulers) XXX_DiscardUnknown() {
-	xxx_messageInfo_TrafficSchedulers.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TrafficSchedulers proto.InternalMessageInfo
-
-func (m *TrafficSchedulers) GetIntfId() uint32 {
-	if m != nil {
-		return m.IntfId
-	}
-	return 0
-}
-
-func (m *TrafficSchedulers) GetOnuId() uint32 {
-	if m != nil {
-		return m.OnuId
-	}
-	return 0
-}
-
-func (m *TrafficSchedulers) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-func (m *TrafficSchedulers) GetPortNo() uint32 {
-	if m != nil {
-		return m.PortNo
-	}
-	return 0
-}
-
-func (m *TrafficSchedulers) GetTrafficScheds() []*TrafficScheduler {
-	if m != nil {
-		return m.TrafficScheds
-	}
-	return nil
-}
-
-type TailDropDiscardConfig struct {
-	QueueSize            uint32   `protobuf:"fixed32,1,opt,name=queue_size,json=queueSize,proto3" json:"queue_size,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *TailDropDiscardConfig) Reset()         { *m = TailDropDiscardConfig{} }
-func (m *TailDropDiscardConfig) String() string { return proto.CompactTextString(m) }
-func (*TailDropDiscardConfig) ProtoMessage()    {}
-func (*TailDropDiscardConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{4}
-}
-
-func (m *TailDropDiscardConfig) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TailDropDiscardConfig.Unmarshal(m, b)
-}
-func (m *TailDropDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TailDropDiscardConfig.Marshal(b, m, deterministic)
-}
-func (m *TailDropDiscardConfig) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TailDropDiscardConfig.Merge(m, src)
-}
-func (m *TailDropDiscardConfig) XXX_Size() int {
-	return xxx_messageInfo_TailDropDiscardConfig.Size(m)
-}
-func (m *TailDropDiscardConfig) XXX_DiscardUnknown() {
-	xxx_messageInfo_TailDropDiscardConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TailDropDiscardConfig proto.InternalMessageInfo
-
-func (m *TailDropDiscardConfig) GetQueueSize() uint32 {
-	if m != nil {
-		return m.QueueSize
-	}
-	return 0
-}
-
-type RedDiscardConfig struct {
-	MinThreshold         uint32   `protobuf:"fixed32,1,opt,name=min_threshold,json=minThreshold,proto3" json:"min_threshold,omitempty"`
-	MaxThreshold         uint32   `protobuf:"fixed32,2,opt,name=max_threshold,json=maxThreshold,proto3" json:"max_threshold,omitempty"`
-	MaxProbability       uint32   `protobuf:"fixed32,3,opt,name=max_probability,json=maxProbability,proto3" json:"max_probability,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RedDiscardConfig) Reset()         { *m = RedDiscardConfig{} }
-func (m *RedDiscardConfig) String() string { return proto.CompactTextString(m) }
-func (*RedDiscardConfig) ProtoMessage()    {}
-func (*RedDiscardConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{5}
-}
-
-func (m *RedDiscardConfig) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_RedDiscardConfig.Unmarshal(m, b)
-}
-func (m *RedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_RedDiscardConfig.Marshal(b, m, deterministic)
-}
-func (m *RedDiscardConfig) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RedDiscardConfig.Merge(m, src)
-}
-func (m *RedDiscardConfig) XXX_Size() int {
-	return xxx_messageInfo_RedDiscardConfig.Size(m)
-}
-func (m *RedDiscardConfig) XXX_DiscardUnknown() {
-	xxx_messageInfo_RedDiscardConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RedDiscardConfig proto.InternalMessageInfo
-
-func (m *RedDiscardConfig) GetMinThreshold() uint32 {
-	if m != nil {
-		return m.MinThreshold
-	}
-	return 0
-}
-
-func (m *RedDiscardConfig) GetMaxThreshold() uint32 {
-	if m != nil {
-		return m.MaxThreshold
-	}
-	return 0
-}
-
-func (m *RedDiscardConfig) GetMaxProbability() uint32 {
-	if m != nil {
-		return m.MaxProbability
-	}
-	return 0
-}
-
-type WRedDiscardConfig struct {
-	Green                *RedDiscardConfig `protobuf:"bytes,1,opt,name=green,proto3" json:"green,omitempty"`
-	Yellow               *RedDiscardConfig `protobuf:"bytes,2,opt,name=yellow,proto3" json:"yellow,omitempty"`
-	Red                  *RedDiscardConfig `protobuf:"bytes,3,opt,name=red,proto3" json:"red,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
-	XXX_unrecognized     []byte            `json:"-"`
-	XXX_sizecache        int32             `json:"-"`
-}
-
-func (m *WRedDiscardConfig) Reset()         { *m = WRedDiscardConfig{} }
-func (m *WRedDiscardConfig) String() string { return proto.CompactTextString(m) }
-func (*WRedDiscardConfig) ProtoMessage()    {}
-func (*WRedDiscardConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{6}
-}
-
-func (m *WRedDiscardConfig) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_WRedDiscardConfig.Unmarshal(m, b)
-}
-func (m *WRedDiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_WRedDiscardConfig.Marshal(b, m, deterministic)
-}
-func (m *WRedDiscardConfig) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WRedDiscardConfig.Merge(m, src)
-}
-func (m *WRedDiscardConfig) XXX_Size() int {
-	return xxx_messageInfo_WRedDiscardConfig.Size(m)
-}
-func (m *WRedDiscardConfig) XXX_DiscardUnknown() {
-	xxx_messageInfo_WRedDiscardConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WRedDiscardConfig proto.InternalMessageInfo
-
-func (m *WRedDiscardConfig) GetGreen() *RedDiscardConfig {
-	if m != nil {
-		return m.Green
-	}
-	return nil
-}
-
-func (m *WRedDiscardConfig) GetYellow() *RedDiscardConfig {
-	if m != nil {
-		return m.Yellow
-	}
-	return nil
-}
-
-func (m *WRedDiscardConfig) GetRed() *RedDiscardConfig {
-	if m != nil {
-		return m.Red
-	}
-	return nil
-}
-
-type DiscardConfig struct {
-	DiscardPolicy DiscardPolicy `protobuf:"varint,1,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
-	// Types that are valid to be assigned to DiscardConfig:
-	//	*DiscardConfig_TailDropDiscardConfig
-	//	*DiscardConfig_RedDiscardConfig
-	//	*DiscardConfig_WredDiscardConfig
-	DiscardConfig        isDiscardConfig_DiscardConfig `protobuf_oneof:"discard_config"`
-	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
-	XXX_unrecognized     []byte                        `json:"-"`
-	XXX_sizecache        int32                         `json:"-"`
-}
-
-func (m *DiscardConfig) Reset()         { *m = DiscardConfig{} }
-func (m *DiscardConfig) String() string { return proto.CompactTextString(m) }
-func (*DiscardConfig) ProtoMessage()    {}
-func (*DiscardConfig) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{7}
-}
-
-func (m *DiscardConfig) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_DiscardConfig.Unmarshal(m, b)
-}
-func (m *DiscardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_DiscardConfig.Marshal(b, m, deterministic)
-}
-func (m *DiscardConfig) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DiscardConfig.Merge(m, src)
-}
-func (m *DiscardConfig) XXX_Size() int {
-	return xxx_messageInfo_DiscardConfig.Size(m)
-}
-func (m *DiscardConfig) XXX_DiscardUnknown() {
-	xxx_messageInfo_DiscardConfig.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DiscardConfig proto.InternalMessageInfo
-
-func (m *DiscardConfig) GetDiscardPolicy() DiscardPolicy {
-	if m != nil {
-		return m.DiscardPolicy
-	}
-	return DiscardPolicy_TailDrop
-}
-
-type isDiscardConfig_DiscardConfig interface {
-	isDiscardConfig_DiscardConfig()
-}
-
-type DiscardConfig_TailDropDiscardConfig struct {
-	TailDropDiscardConfig *TailDropDiscardConfig `protobuf:"bytes,2,opt,name=tail_drop_discard_config,json=tailDropDiscardConfig,proto3,oneof"`
-}
-
-type DiscardConfig_RedDiscardConfig struct {
-	RedDiscardConfig *RedDiscardConfig `protobuf:"bytes,3,opt,name=red_discard_config,json=redDiscardConfig,proto3,oneof"`
-}
-
-type DiscardConfig_WredDiscardConfig struct {
-	WredDiscardConfig *WRedDiscardConfig `protobuf:"bytes,4,opt,name=wred_discard_config,json=wredDiscardConfig,proto3,oneof"`
-}
-
-func (*DiscardConfig_TailDropDiscardConfig) isDiscardConfig_DiscardConfig() {}
-
-func (*DiscardConfig_RedDiscardConfig) isDiscardConfig_DiscardConfig() {}
-
-func (*DiscardConfig_WredDiscardConfig) isDiscardConfig_DiscardConfig() {}
-
-func (m *DiscardConfig) GetDiscardConfig() isDiscardConfig_DiscardConfig {
-	if m != nil {
-		return m.DiscardConfig
-	}
-	return nil
-}
-
-func (m *DiscardConfig) GetTailDropDiscardConfig() *TailDropDiscardConfig {
-	if x, ok := m.GetDiscardConfig().(*DiscardConfig_TailDropDiscardConfig); ok {
-		return x.TailDropDiscardConfig
-	}
-	return nil
-}
-
-func (m *DiscardConfig) GetRedDiscardConfig() *RedDiscardConfig {
-	if x, ok := m.GetDiscardConfig().(*DiscardConfig_RedDiscardConfig); ok {
-		return x.RedDiscardConfig
-	}
-	return nil
-}
-
-func (m *DiscardConfig) GetWredDiscardConfig() *WRedDiscardConfig {
-	if x, ok := m.GetDiscardConfig().(*DiscardConfig_WredDiscardConfig); ok {
-		return x.WredDiscardConfig
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*DiscardConfig) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*DiscardConfig_TailDropDiscardConfig)(nil),
-		(*DiscardConfig_RedDiscardConfig)(nil),
-		(*DiscardConfig_WredDiscardConfig)(nil),
-	}
-}
-
-type TrafficQueue struct {
-	Direction            Direction        `protobuf:"varint,1,opt,name=direction,proto3,enum=tech_profile.Direction" json:"direction,omitempty"`
-	GemportId            uint32           `protobuf:"fixed32,2,opt,name=gemport_id,json=gemportId,proto3" json:"gemport_id,omitempty"`
-	PbitMap              string           `protobuf:"bytes,3,opt,name=pbit_map,json=pbitMap,proto3" json:"pbit_map,omitempty"`
-	AesEncryption        bool             `protobuf:"varint,4,opt,name=aes_encryption,json=aesEncryption,proto3" json:"aes_encryption,omitempty"`
-	SchedPolicy          SchedulingPolicy `protobuf:"varint,5,opt,name=sched_policy,json=schedPolicy,proto3,enum=tech_profile.SchedulingPolicy" json:"sched_policy,omitempty"`
-	Priority             uint32           `protobuf:"fixed32,6,opt,name=priority,proto3" json:"priority,omitempty"`
-	Weight               uint32           `protobuf:"fixed32,7,opt,name=weight,proto3" json:"weight,omitempty"`
-	DiscardPolicy        DiscardPolicy    `protobuf:"varint,8,opt,name=discard_policy,json=discardPolicy,proto3,enum=tech_profile.DiscardPolicy" json:"discard_policy,omitempty"`
-	DiscardConfig        *DiscardConfig   `protobuf:"bytes,9,opt,name=discard_config,json=discardConfig,proto3" json:"discard_config,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *TrafficQueue) Reset()         { *m = TrafficQueue{} }
-func (m *TrafficQueue) String() string { return proto.CompactTextString(m) }
-func (*TrafficQueue) ProtoMessage()    {}
-func (*TrafficQueue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{8}
-}
-
-func (m *TrafficQueue) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TrafficQueue.Unmarshal(m, b)
-}
-func (m *TrafficQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TrafficQueue.Marshal(b, m, deterministic)
-}
-func (m *TrafficQueue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TrafficQueue.Merge(m, src)
-}
-func (m *TrafficQueue) XXX_Size() int {
-	return xxx_messageInfo_TrafficQueue.Size(m)
-}
-func (m *TrafficQueue) XXX_DiscardUnknown() {
-	xxx_messageInfo_TrafficQueue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TrafficQueue proto.InternalMessageInfo
-
-func (m *TrafficQueue) GetDirection() Direction {
-	if m != nil {
-		return m.Direction
-	}
-	return Direction_UPSTREAM
-}
-
-func (m *TrafficQueue) GetGemportId() uint32 {
-	if m != nil {
-		return m.GemportId
-	}
-	return 0
-}
-
-func (m *TrafficQueue) GetPbitMap() string {
-	if m != nil {
-		return m.PbitMap
-	}
-	return ""
-}
-
-func (m *TrafficQueue) GetAesEncryption() bool {
-	if m != nil {
-		return m.AesEncryption
-	}
-	return false
-}
-
-func (m *TrafficQueue) GetSchedPolicy() SchedulingPolicy {
-	if m != nil {
-		return m.SchedPolicy
-	}
-	return SchedulingPolicy_WRR
-}
-
-func (m *TrafficQueue) GetPriority() uint32 {
-	if m != nil {
-		return m.Priority
-	}
-	return 0
-}
-
-func (m *TrafficQueue) GetWeight() uint32 {
-	if m != nil {
-		return m.Weight
-	}
-	return 0
-}
-
-func (m *TrafficQueue) GetDiscardPolicy() DiscardPolicy {
-	if m != nil {
-		return m.DiscardPolicy
-	}
-	return DiscardPolicy_TailDrop
-}
-
-func (m *TrafficQueue) GetDiscardConfig() *DiscardConfig {
-	if m != nil {
-		return m.DiscardConfig
-	}
-	return nil
-}
-
-type TrafficQueues struct {
-	IntfId               uint32          `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
-	OnuId                uint32          `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
-	UniId                uint32          `protobuf:"fixed32,4,opt,name=uni_id,json=uniId,proto3" json:"uni_id,omitempty"`
-	PortNo               uint32          `protobuf:"fixed32,5,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
-	TrafficQueues        []*TrafficQueue `protobuf:"bytes,6,rep,name=traffic_queues,json=trafficQueues,proto3" json:"traffic_queues,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *TrafficQueues) Reset()         { *m = TrafficQueues{} }
-func (m *TrafficQueues) String() string { return proto.CompactTextString(m) }
-func (*TrafficQueues) ProtoMessage()    {}
-func (*TrafficQueues) Descriptor() ([]byte, []int) {
-	return fileDescriptor_d019a68bffe14cae, []int{9}
-}
-
-func (m *TrafficQueues) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_TrafficQueues.Unmarshal(m, b)
-}
-func (m *TrafficQueues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_TrafficQueues.Marshal(b, m, deterministic)
-}
-func (m *TrafficQueues) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TrafficQueues.Merge(m, src)
-}
-func (m *TrafficQueues) XXX_Size() int {
-	return xxx_messageInfo_TrafficQueues.Size(m)
-}
-func (m *TrafficQueues) XXX_DiscardUnknown() {
-	xxx_messageInfo_TrafficQueues.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TrafficQueues proto.InternalMessageInfo
-
-func (m *TrafficQueues) GetIntfId() uint32 {
-	if m != nil {
-		return m.IntfId
-	}
-	return 0
-}
-
-func (m *TrafficQueues) GetOnuId() uint32 {
-	if m != nil {
-		return m.OnuId
-	}
-	return 0
-}
-
-func (m *TrafficQueues) GetUniId() uint32 {
-	if m != nil {
-		return m.UniId
-	}
-	return 0
-}
-
-func (m *TrafficQueues) GetPortNo() uint32 {
-	if m != nil {
-		return m.PortNo
-	}
-	return 0
-}
-
-func (m *TrafficQueues) GetTrafficQueues() []*TrafficQueue {
-	if m != nil {
-		return m.TrafficQueues
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterEnum("tech_profile.Direction", Direction_name, Direction_value)
-	proto.RegisterEnum("tech_profile.SchedulingPolicy", SchedulingPolicy_name, SchedulingPolicy_value)
-	proto.RegisterEnum("tech_profile.AdditionalBW", AdditionalBW_name, AdditionalBW_value)
-	proto.RegisterEnum("tech_profile.DiscardPolicy", DiscardPolicy_name, DiscardPolicy_value)
-	proto.RegisterEnum("tech_profile.InferredAdditionBWIndication", InferredAdditionBWIndication_name, InferredAdditionBWIndication_value)
-	proto.RegisterType((*SchedulerConfig)(nil), "tech_profile.SchedulerConfig")
-	proto.RegisterType((*TrafficShapingInfo)(nil), "tech_profile.TrafficShapingInfo")
-	proto.RegisterType((*TrafficScheduler)(nil), "tech_profile.TrafficScheduler")
-	proto.RegisterType((*TrafficSchedulers)(nil), "tech_profile.TrafficSchedulers")
-	proto.RegisterType((*TailDropDiscardConfig)(nil), "tech_profile.TailDropDiscardConfig")
-	proto.RegisterType((*RedDiscardConfig)(nil), "tech_profile.RedDiscardConfig")
-	proto.RegisterType((*WRedDiscardConfig)(nil), "tech_profile.WRedDiscardConfig")
-	proto.RegisterType((*DiscardConfig)(nil), "tech_profile.DiscardConfig")
-	proto.RegisterType((*TrafficQueue)(nil), "tech_profile.TrafficQueue")
-	proto.RegisterType((*TrafficQueues)(nil), "tech_profile.TrafficQueues")
-}
-
-func init() { proto.RegisterFile("voltha_protos/tech_profile.proto", fileDescriptor_d019a68bffe14cae) }
-
-var fileDescriptor_d019a68bffe14cae = []byte{
-	// 1103 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdd, 0x6e, 0x1b, 0x45,
-	0x14, 0xf6, 0xda, 0x8d, 0x7f, 0x4e, 0x6c, 0x77, 0x33, 0x25, 0xd4, 0xa4, 0x0d, 0x04, 0x97, 0xaa,
-	0x91, 0x11, 0x09, 0xa4, 0xd0, 0x9b, 0x22, 0x55, 0x76, 0x13, 0x29, 0x96, 0x68, 0x9a, 0x4e, 0x82,
-	0x2c, 0x71, 0xc1, 0x6a, 0xbc, 0x33, 0xb6, 0x47, 0x5a, 0xcf, 0x2c, 0xb3, 0x63, 0x9c, 0xf4, 0x8a,
-	0x1b, 0xde, 0x82, 0x5b, 0x5e, 0x00, 0x6e, 0x10, 0x4f, 0xc4, 0x0b, 0x70, 0x8f, 0x66, 0x76, 0xd7,
-	0xf6, 0xda, 0x26, 0x85, 0x0a, 0xee, 0xe6, 0x7c, 0xfb, 0xcd, 0x99, 0xf3, 0xcd, 0xf9, 0xd9, 0x81,
-	0xbd, 0xef, 0x65, 0xa0, 0x47, 0xc4, 0x0b, 0x95, 0xd4, 0x32, 0x3a, 0xd4, 0xcc, 0x1f, 0x99, 0xf5,
-	0x80, 0x07, 0xec, 0xc0, 0x62, 0xa8, 0xba, 0x88, 0xed, 0xdc, 0x1f, 0x4a, 0x39, 0x0c, 0xd8, 0x21,
-	0x09, 0xf9, 0x21, 0x11, 0x42, 0x6a, 0xa2, 0xb9, 0x14, 0x51, 0xcc, 0x6d, 0xfe, 0x90, 0x87, 0xdb,
-	0x17, 0xfe, 0x88, 0xd1, 0x49, 0xc0, 0xd4, 0x73, 0x29, 0x06, 0x7c, 0x88, 0xbe, 0x80, 0x0a, 0xe5,
-	0x8a, 0xf9, 0x86, 0xd7, 0x70, 0xf6, 0x9c, 0xfd, 0xfa, 0xd1, 0xdd, 0x83, 0xcc, 0x39, 0xc7, 0xe9,
-	0x67, 0x3c, 0x67, 0xa2, 0x67, 0x50, 0x23, 0x94, 0x72, 0xb3, 0x26, 0x81, 0xd7, 0x9f, 0x36, 0xf2,
-	0x76, 0xeb, 0x4e, 0x76, 0x6b, 0x7b, 0x46, 0xe9, 0xf4, 0x70, 0x75, 0xbe, 0xa1, 0x33, 0x45, 0x3b,
-	0x50, 0x0e, 0x15, 0x97, 0x8a, 0xeb, 0xeb, 0x46, 0x61, 0xcf, 0xd9, 0x2f, 0xe1, 0x99, 0x8d, 0xde,
-	0x85, 0xe2, 0x94, 0xf1, 0xe1, 0x48, 0x37, 0x6e, 0xd9, 0x2f, 0x89, 0x85, 0xda, 0x50, 0x8d, 0x4c,
-	0xf8, 0x5e, 0x28, 0x03, 0xee, 0x5f, 0x37, 0x36, 0xec, 0x99, 0xef, 0x67, 0xcf, 0x4c, 0x04, 0x72,
-	0x31, 0x3c, 0xb7, 0x2c, 0xbc, 0x69, 0xf7, 0xc4, 0x46, 0xf3, 0x37, 0x07, 0xd0, 0xa5, 0x22, 0x83,
-	0x01, 0xf7, 0x2f, 0x46, 0x24, 0xe4, 0x62, 0xd8, 0x15, 0x03, 0x89, 0x5c, 0x28, 0xf8, 0x5c, 0x59,
-	0xfd, 0x25, 0x6c, 0x96, 0x16, 0xe9, 0x47, 0x56, 0x96, 0x41, 0xfa, 0x91, 0x41, 0x42, 0xae, 0x92,
-	0x60, 0xcd, 0xd2, 0x22, 0xfd, 0x28, 0x09, 0xd2, 0x2c, 0x0d, 0x32, 0xe4, 0xca, 0x06, 0x56, 0xc2,
-	0x66, 0x89, 0x4e, 0x01, 0x08, 0xa5, 0x5e, 0x7f, 0xea, 0x71, 0x41, 0x1b, 0x45, 0x1b, 0x71, 0x2b,
-	0x1b, 0x71, 0x57, 0x0c, 0x98, 0x52, 0x8c, 0xa6, 0xb7, 0xd5, 0xe9, 0x75, 0x05, 0xe5, 0xbe, 0x4d,
-	0x1d, 0x2e, 0x13, 0x4a, 0x3b, 0xd3, 0xae, 0xa0, 0xcd, 0x3f, 0x1d, 0x70, 0xd3, 0xd0, 0xd3, 0x24,
-	0xbe, 0x6d, 0xfa, 0xde, 0x83, 0x32, 0x09, 0x02, 0xe9, 0x7b, 0x9c, 0x26, 0x12, 0x4b, 0xd6, 0xee,
-	0x52, 0xf4, 0x14, 0x2a, 0x51, 0xea, 0xde, 0x8a, 0xdd, 0x3c, 0xda, 0x5d, 0x7b, 0xc3, 0x69, 0x09,
-	0xe1, 0x39, 0x1f, 0x61, 0x78, 0x47, 0xc7, 0x21, 0x7a, 0x51, 0x7c, 0xbd, 0x1e, 0x17, 0x03, 0x69,
-	0xaf, 0x68, 0xf3, 0x68, 0x2f, 0xeb, 0x67, 0x35, 0x0f, 0x18, 0xe9, 0x15, 0xac, 0xf9, 0xbb, 0x03,
-	0x5b, 0xcb, 0xba, 0x23, 0x74, 0x17, 0x4a, 0x5c, 0xe8, 0x81, 0x11, 0x10, 0x67, 0xad, 0x68, 0xcc,
-	0x2e, 0x45, 0xdb, 0x50, 0x94, 0x62, 0x32, 0x17, 0xb6, 0x21, 0xc5, 0x24, 0x86, 0x27, 0x82, 0x1b,
-	0x38, 0x4e, 0xd7, 0xc6, 0x44, 0xf0, 0x2e, 0x35, 0x6e, 0x42, 0xa9, 0xb4, 0x27, 0x64, 0x92, 0xb4,
-	0xa2, 0x31, 0xcf, 0x24, 0x3a, 0x81, 0xfa, 0x4c, 0x89, 0x39, 0x35, 0x6a, 0x14, 0xf6, 0x0a, 0xfb,
-	0x9b, 0xcb, 0xd5, 0xb6, 0x1c, 0x18, 0xae, 0xe9, 0x05, 0x24, 0x6a, 0x3e, 0x81, 0xed, 0x4b, 0xc2,
-	0x83, 0x63, 0x25, 0xc3, 0x63, 0x1e, 0xf9, 0x44, 0xd1, 0xa4, 0xef, 0x76, 0x01, 0xbe, 0x9b, 0xb0,
-	0x09, 0xf3, 0x22, 0xfe, 0x9a, 0x25, 0x12, 0x2a, 0x16, 0xb9, 0xe0, 0xaf, 0x59, 0xf3, 0x47, 0x07,
-	0x5c, 0xcc, 0x68, 0x76, 0xcf, 0x03, 0xa8, 0x8d, 0xb9, 0xf0, 0xf4, 0x48, 0xb1, 0x68, 0x24, 0x83,
-	0x54, 0x79, 0x75, 0xcc, 0xc5, 0x65, 0x8a, 0x59, 0x12, 0xb9, 0x5a, 0x20, 0xe5, 0x13, 0x12, 0xb9,
-	0x9a, 0x93, 0x1e, 0xc1, 0x6d, 0x43, 0x0a, 0x95, 0xec, 0x93, 0x3e, 0x0f, 0xe6, 0x4d, 0x58, 0x1f,
-	0x93, 0xab, 0xf3, 0x39, 0xda, 0xfc, 0xd5, 0x81, 0xad, 0xde, 0x4a, 0x20, 0x9f, 0xc3, 0xc6, 0x50,
-	0x31, 0x16, 0x57, 0xdc, 0xca, 0x9d, 0x2c, 0xd3, 0x71, 0x4c, 0x46, 0x4f, 0xa0, 0x78, 0xcd, 0x82,
-	0x40, 0xc6, 0xc3, 0xe2, 0xcd, 0xdb, 0x12, 0x36, 0xfa, 0x14, 0x0a, 0x8a, 0xd1, 0xa4, 0x16, 0xdf,
-	0xb4, 0xc9, 0x50, 0x9b, 0x7f, 0xe4, 0xa1, 0x96, 0x8d, 0xb8, 0x03, 0x75, 0x1a, 0x03, 0xe9, 0xf0,
-	0x88, 0x9b, 0xe5, 0xde, 0x72, 0xb3, 0x58, 0x4e, 0x32, 0x39, 0x6a, 0x74, 0xd1, 0x44, 0xdf, 0x42,
-	0x43, 0x13, 0x1e, 0x78, 0x54, 0xc9, 0xd0, 0x4b, 0xbd, 0xf9, 0xd6, 0x7f, 0xa2, 0xe8, 0xc1, 0x52,
-	0x71, 0xac, 0xcb, 0xfc, 0x69, 0x0e, 0x6f, 0xeb, 0xb5, 0x25, 0x71, 0x06, 0x48, 0x31, 0xba, 0xec,
-	0xf9, 0x1f, 0xc9, 0x3e, 0xcd, 0x61, 0x57, 0x2d, 0x67, 0xe9, 0x15, 0xdc, 0x99, 0xae, 0x71, 0x18,
-	0xf7, 0xe2, 0x07, 0x59, 0x87, 0xbd, 0x35, 0x1e, 0xb7, 0xa6, 0xcb, 0x2e, 0x3b, 0xee, 0xfc, 0x1a,
-	0x63, 0x6f, 0xcd, 0x9f, 0x0b, 0x50, 0x4d, 0x9a, 0xe0, 0x95, 0xa9, 0xde, 0xb7, 0x9d, 0x48, 0xbb,
-	0x00, 0x43, 0x36, 0xb6, 0xbd, 0x38, 0x6b, 0xdd, 0x4a, 0x82, 0x74, 0xa9, 0x19, 0x58, 0x61, 0x9f,
-	0x6b, 0x6f, 0x4c, 0x42, 0x7b, 0x23, 0x15, 0x5c, 0x32, 0xf6, 0x0b, 0x12, 0xa2, 0x87, 0x50, 0x27,
-	0x2c, 0xf2, 0x98, 0xf0, 0xd5, 0x75, 0x68, 0x4f, 0x35, 0x0a, 0xcb, 0xb8, 0x46, 0x58, 0x74, 0x32,
-	0x03, 0xff, 0x83, 0x9f, 0x47, 0xe6, 0x9f, 0x55, 0xfc, 0xdb, 0x7f, 0x56, 0x29, 0xf3, 0xcf, 0x5a,
-	0x2d, 0xbc, 0xf2, 0xbf, 0x2e, 0xbc, 0xce, 0xf2, 0xad, 0x37, 0x2a, 0x36, 0x87, 0xeb, 0x7d, 0x24,
-	0x8d, 0x90, 0xfa, 0x88, 0xcd, 0xe6, 0x2f, 0x0e, 0xd4, 0x16, 0xf3, 0xf4, 0xff, 0x4f, 0xd0, 0xf6,
-	0x7c, 0x82, 0xda, 0xb9, 0x16, 0x35, 0x8a, 0x76, 0x82, 0xee, 0xac, 0x9d, 0xa0, 0x36, 0xa8, 0xd9,
-	0xf4, 0x8c, 0x43, 0x6c, 0x7d, 0x09, 0x95, 0x59, 0xb1, 0xa0, 0x2a, 0x94, 0xbf, 0x3e, 0xbf, 0xb8,
-	0xc4, 0x27, 0xed, 0x17, 0x6e, 0x0e, 0xd5, 0x01, 0x8e, 0x5f, 0xf6, 0xce, 0x12, 0xdb, 0x41, 0x5b,
-	0x50, 0xeb, 0x74, 0x8f, 0xbb, 0xf8, 0xe4, 0xf9, 0x65, 0xf7, 0xe5, 0x59, 0xfb, 0x2b, 0x37, 0xdf,
-	0x7a, 0x0a, 0xee, 0x72, 0x3e, 0x51, 0x09, 0x0a, 0x3d, 0x8c, 0xdd, 0x1c, 0x42, 0x50, 0xbf, 0xd0,
-	0x8a, 0xfb, 0xfa, 0x3c, 0xc9, 0xa0, 0xeb, 0x20, 0x80, 0xe2, 0xe9, 0x75, 0x5f, 0x71, 0xea, 0xe6,
-	0x5b, 0x02, 0xaa, 0x8b, 0xaf, 0x17, 0xb4, 0x0d, 0x5b, 0x8b, 0xb6, 0x77, 0x26, 0x05, 0x73, 0x73,
-	0xe8, 0x0e, 0xdc, 0xce, 0xc2, 0x6d, 0xd7, 0x41, 0xf7, 0xe0, 0x6e, 0x06, 0xec, 0xb0, 0x48, 0x9f,
-	0x0c, 0x06, 0x52, 0x69, 0x37, 0xbf, 0xe2, 0xa8, 0x3d, 0xd1, 0xd2, 0x2d, 0xb4, 0x9e, 0xcd, 0x26,
-	0x56, 0x12, 0x69, 0x15, 0xca, 0xe9, 0xfc, 0x70, 0x73, 0xa8, 0x06, 0x95, 0xde, 0xcc, 0x74, 0x8c,
-	0x0c, 0xcc, 0xa8, 0x9b, 0x47, 0x65, 0xb8, 0x65, 0x5a, 0xd7, 0x2d, 0xb4, 0x7e, 0x72, 0xe0, 0xfe,
-	0x4d, 0x2f, 0x09, 0xf4, 0x10, 0x3e, 0xbc, 0xe9, 0x7b, 0xaa, 0x68, 0x1f, 0x3e, 0xba, 0x91, 0xd6,
-	0x8e, 0xa2, 0x89, 0x62, 0xd4, 0x75, 0xd0, 0xc7, 0xf0, 0xe8, 0x46, 0xe6, 0xa2, 0xec, 0xce, 0xe3,
-	0x6f, 0x3e, 0x1b, 0x72, 0x3d, 0x9a, 0xf4, 0x0f, 0x7c, 0x39, 0x3e, 0x94, 0x21, 0x13, 0xbe, 0x54,
-	0xf4, 0x30, 0x7e, 0xdf, 0x7e, 0x92, 0xbc, 0x6f, 0x87, 0x32, 0xf3, 0xc4, 0xed, 0x17, 0x2d, 0xfe,
-	0xf8, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x7b, 0x91, 0x72, 0x07, 0x0b, 0x00, 0x00,
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 251fedd..c0ad304 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -6,8 +6,6 @@
 github.com/armon/go-metrics
 # github.com/bclicn/color v0.0.0-20180711051946-108f2023dc84
 github.com/bclicn/color
-# github.com/boljen/go-bitmap v0.0.0-20151001105940-23cd2fb0ce7d
-github.com/boljen/go-bitmap
 # github.com/bsm/sarama-cluster v2.1.15+incompatible
 github.com/bsm/sarama-cluster
 # github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73
@@ -29,12 +27,12 @@
 github.com/gogo/protobuf/gogoproto
 github.com/gogo/protobuf/protoc-gen-gogo/descriptor
 # github.com/golang/protobuf v1.3.2
-github.com/golang/protobuf/proto
+github.com/golang/protobuf/ptypes/empty
 github.com/golang/protobuf/ptypes
 github.com/golang/protobuf/ptypes/any
-github.com/golang/protobuf/ptypes/empty
-github.com/golang/protobuf/descriptor
+github.com/golang/protobuf/proto
 github.com/golang/protobuf/protoc-gen-go/descriptor
+github.com/golang/protobuf/descriptor
 github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/timestamp
 # github.com/golang/snappy v0.0.1
@@ -64,12 +62,22 @@
 github.com/mitchellh/go-homedir
 # github.com/mitchellh/mapstructure v1.1.2
 github.com/mitchellh/mapstructure
+# github.com/opencord/voltha-lib-go v0.0.0-20191017201200-e73f91e306e9
+github.com/opencord/voltha-lib-go/pkg/log
+github.com/opencord/voltha-lib-go/pkg/db/kvstore
+github.com/opencord/voltha-lib-go/pkg/grpc
+github.com/opencord/voltha-lib-go/pkg/probe
+github.com/opencord/voltha-lib-go/pkg/version
+github.com/opencord/voltha-lib-go/pkg/db/model
+github.com/opencord/voltha-lib-go/pkg/kafka
+github.com/opencord/voltha-lib-go/pkg/adapters/common
+github.com/opencord/voltha-lib-go/pkg/adapters
+github.com/opencord/voltha-lib-go/pkg/adapters/adapterif
 # github.com/opencord/voltha-protos v1.0.3
-github.com/opencord/voltha-protos/go/inter_container
-github.com/opencord/voltha-protos/go/openflow_13
-github.com/opencord/voltha-protos/go/voltha
 github.com/opencord/voltha-protos/go/common
-github.com/opencord/voltha-protos/go/tech_profile
+github.com/opencord/voltha-protos/go/voltha
+github.com/opencord/voltha-protos/go/openflow_13
+github.com/opencord/voltha-protos/go/inter_container
 github.com/opencord/voltha-protos/go/omci
 # github.com/pierrec/lz4 v2.3.0+incompatible
 github.com/pierrec/lz4
@@ -116,14 +124,14 @@
 golang.org/x/crypto/pbkdf2
 # golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3
 golang.org/x/net/trace
-golang.org/x/net/proxy
 golang.org/x/net/internal/timeseries
 golang.org/x/net/http2
 golang.org/x/net/http2/hpack
-golang.org/x/net/context
-golang.org/x/net/internal/socks
+golang.org/x/net/proxy
 golang.org/x/net/http/httpguts
 golang.org/x/net/idna
+golang.org/x/net/context
+golang.org/x/net/internal/socks
 # golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24
 golang.org/x/sys/unix
 # golang.org/x/text v0.3.2
@@ -135,18 +143,18 @@
 google.golang.org/genproto/googleapis/api/annotations
 google.golang.org/genproto/googleapis/rpc/status
 # google.golang.org/grpc v1.24.0
-google.golang.org/grpc/codes
-google.golang.org/grpc/status
 google.golang.org/grpc
-google.golang.org/grpc/credentials
+google.golang.org/grpc/codes
 google.golang.org/grpc/metadata
-google.golang.org/grpc/internal
+google.golang.org/grpc/status
 google.golang.org/grpc/balancer
 google.golang.org/grpc/balancer/roundrobin
 google.golang.org/grpc/connectivity
+google.golang.org/grpc/credentials
 google.golang.org/grpc/encoding
 google.golang.org/grpc/encoding/proto
 google.golang.org/grpc/grpclog
+google.golang.org/grpc/internal
 google.golang.org/grpc/internal/backoff
 google.golang.org/grpc/internal/balancerload
 google.golang.org/grpc/internal/binarylog
@@ -164,8 +172,8 @@
 google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/tap
-google.golang.org/grpc/credentials/internal
 google.golang.org/grpc/balancer/base
+google.golang.org/grpc/credentials/internal
 google.golang.org/grpc/binarylog/grpc_binarylog_v1
 google.golang.org/grpc/internal/syscall
 # gopkg.in/jcmturner/aescts.v1 v1.0.1