VOL-1596 Add Support for handling multicast groups in OpenOLT Adapter.
VOL-1595 Add Support for handling multicast flows in OpenOLT Adapter.

Depends voltha-protos from the patch below:
https://gerrit.opencord.org/#/c/16690/

Change-Id: I1cc9900bd6400bb31aed11beda674138838a21d2
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/README.md b/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/README.md
deleted file mode 100644
index 13479f8..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/README.md
+++ /dev/null
@@ -1,10 +0,0 @@
-## How to Build and Run a Voltha Go language Adapter
-
-This directory is a repo for all voltha adapters written in Go language.  At this time, the simulated_olt and 
-simulated_onu adapters are the only adapters using the Go language.  These adapters provide basic capabilities
-which will be used for high availability and capacity testing.
-
-### Building and running the Simulated OLT and ONU Adapters
-
-Please refer to the ```BUILD.md``` file under the voltha-go repo
-
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
similarity index 93%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/adapter_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
index 8197170..de5cfc0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/adapter_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -20,7 +20,7 @@
 	"context"
 
 	"github.com/golang/protobuf/proto"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
 // AdapterProxy interface for AdapterProxy implementation.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
similarity index 90%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/core_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
index c7af374..dbf3418 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/core_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
@@ -19,7 +19,7 @@
 import (
 	"context"
 
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 // CoreProxy interface for voltha-go coreproxy.
@@ -32,10 +32,10 @@
 	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
 	DeviceUpdate(ctx context.Context, device *voltha.Device) error
 	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
-	PortsStateUpdate(ctx context.Context, deviceID string, operStatus voltha.OperStatus_OperStatus) error
+	PortsStateUpdate(ctx context.Context, deviceID string, operStatus voltha.OperStatus_Types) error
 	DeleteAllPorts(ctx context.Context, deviceID string) error
 	DeviceStateUpdate(ctx context.Context, deviceID string,
-		connStatus voltha.ConnectStatus_ConnectStatus, operStatus voltha.OperStatus_OperStatus) error
+		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
 
 	DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error
 	ChildDeviceDetected(ctx context.Context, parentDeviceID string, parentPortNo int,
@@ -49,5 +49,5 @@
 	SendPacketIn(ctx context.Context, deviceID string, port uint32, pktPayload []byte) error
 	DeviceReasonUpdate(ctx context.Context, deviceID string, deviceReason string) error
 	PortStateUpdate(ctx context.Context, deviceID string, pType voltha.Port_PortType, portNo uint32,
-		operStatus voltha.OperStatus_OperStatus) error
+		operStatus voltha.OperStatus_Types) error
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
index b8ea9d8..c144935 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif/events_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
@@ -17,7 +17,7 @@
 package adapterif
 
 import (
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 // EventProxy interface for eventproxy
@@ -33,7 +33,7 @@
 )
 
 type (
-	EventType        = voltha.EventType_EventType
-	EventCategory    = voltha.EventCategory_EventCategory
-	EventSubCategory = voltha.EventSubCategory_EventSubCategory
+	EventType        = voltha.EventType_Types
+	EventCategory    = voltha.EventCategory_Types
+	EventSubCategory = voltha.EventSubCategory_Types
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/adapter_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
index 7b09a1f..b302214 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
@@ -17,14 +17,15 @@
 
 import (
 	"context"
+	"time"
+
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/any"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v2/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
-	"time"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
 type AdapterProxy struct {
@@ -38,7 +39,7 @@
 	proxy.kafkaICProxy = kafkaProxy
 	proxy.adapterTopic = adapterTopic
 	proxy.coreTopic = coreTopic
-	log.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 	return &proxy
 }
 
@@ -50,14 +51,14 @@
 	toDeviceId string,
 	proxyDeviceId string,
 	messageId string) error {
-	log.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+	logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
 		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
 
 	//Marshal the message
 	var marshalledMsg *any.Any
 	var err error
 	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		log.Warnw("cannot-marshal-msg", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
 		return err
 	}
 
@@ -91,6 +92,6 @@
 	rpc := "process_inter_adapter_message"
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	log.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
 	return unPackResponse(rpc, "", success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
similarity index 69%
copy from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
index 0851ede..acf818c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,21 +13,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package common
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 const (
-	logLevel = log.FatalLevel
+	logLevel = log.ErrorLevel
 )
 
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
 func init() {
 	// Setup this package so that it's log level can be modified at run time
-	_, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "common"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
similarity index 79%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/core_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
index a69d9b4..9b46c28 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/core_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
@@ -21,10 +21,10 @@
 
 	"github.com/golang/protobuf/ptypes"
 	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-lib-go/v2/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -44,7 +44,7 @@
 	proxy.coreTopic = coreTopic
 	proxy.deviceIdCoreMap = make(map[string]string)
 	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
-	log.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 
 	return &proxy
 }
@@ -56,9 +56,9 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		log.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
+		logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
 	}
@@ -94,7 +94,7 @@
 }
 
 func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
-	log.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
 	rpc := "Register"
 	topic := kafka.Topic{Name: ap.coreTopic}
 	replyToTopic := ap.getAdapterTopic()
@@ -109,12 +109,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
-	log.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
 	return unPackResponse(rpc, "", success, result)
 }
 
 func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
-	log.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
+	logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
 	rpc := "DeviceUpdate"
 	toTopic := ap.getCoreTopic(device.Id)
 	args := make([]*kafka.KVArg, 1)
@@ -125,12 +125,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, device.Id, args...)
-	log.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
+	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
 	return unPackResponse(rpc, device.Id, success, result)
 }
 
 func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
-	log.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
+	logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
 	rpc := "PortCreated"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -149,11 +149,11 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
+	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
-func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_OperStatus) error {
+func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
 	log.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "PortsStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
@@ -175,12 +175,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
-	log.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
 	rpc := "DeleteAllPorts"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -196,12 +196,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
-	connStatus voltha.ConnectStatus_ConnectStatus, operStatus voltha.OperStatus_OperStatus) error {
+	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
 	log.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "DeviceStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
@@ -227,13 +227,13 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
 	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
-	log.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
+	logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
 	rpc := "ChildDeviceDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -278,12 +278,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
 		}
 		return volthaDevice, nil
@@ -291,9 +291,9 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		log.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO: Need to get the real error code
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
@@ -301,7 +301,7 @@
 }
 
 func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
-	log.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesLost"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -316,12 +316,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
-	log.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -336,12 +336,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
-	log.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
+	logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
 	rpc := "GetDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -355,12 +355,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
 		}
 		return volthaDevice, nil
@@ -368,16 +368,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		log.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
-	log.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+	logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
 	rpc := "GetChildDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -415,12 +415,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
 		}
 		return volthaDevice, nil
@@ -428,16 +428,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		log.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
-	log.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "GetChildDevices"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -451,12 +451,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevices := &voltha.Devices{}
 		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Errorf(codes.InvalidArgument, "%s", err.Error())
 		}
 		return volthaDevices, nil
@@ -464,16 +464,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		log.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return nil, status.Errorf(codes.Internal, "%s", unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
-	log.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
 	rpc := "PacketIn"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -497,12 +497,12 @@
 		Value: pkt,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
-	log.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
+	logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
 	rpc := "DeviceReasonUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -521,12 +521,12 @@
 		Value: reason,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
-	log.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
 	rpc := "DevicePMConfigUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -539,12 +539,12 @@
 		Value: pmConfigs,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
-	log.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
+	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
 	return unPackResponse(rpc, pmConfigs.Id, success, result)
 }
 
 func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
-	log.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "ReconcileChildDevices"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -556,13 +556,13 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	log.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
-	operStatus voltha.OperStatus_OperStatus) error {
-	log.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	operStatus voltha.OperStatus_Types) error {
+	logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
 	rpc := "PortStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -593,6 +593,6 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(nil, rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	log.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
 	return unPackResponse(rpc, deviceId, success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
index ab6b0d0..034de8e 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
@@ -23,10 +23,10 @@
 	"strings"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v2/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 type EventProxy struct {
@@ -81,7 +81,7 @@
 /* Send out device events*/
 func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if deviceEvent == nil {
-		log.Error("Recieved empty device event")
+		logger.Error("Recieved empty device event")
 		return errors.New("Device event nil")
 	}
 	var event voltha.Event
@@ -90,10 +90,10 @@
 	event.Header = ep.getEventHeader(deviceEvent.DeviceEventName, category, subCategory, voltha.EventType_DEVICE_EVENT, raisedTs)
 	event.EventType = &de
 	if err := ep.sendEvent(&event); err != nil {
-		log.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
 		return err
 	}
-	log.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
 		"DeviceEventName": deviceEvent.DeviceEventName})
@@ -105,7 +105,7 @@
 // SendKpiEvent is to send kpi events to voltha.event topic
 func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if kpiEvent == nil {
-		log.Error("Recieved empty kpi event")
+		logger.Error("Recieved empty kpi event")
 		return errors.New("KPI event nil")
 	}
 	var event voltha.Event
@@ -114,10 +114,10 @@
 	event.Header = ep.getEventHeader(id, category, subCategory, voltha.EventType_KPI_EVENT2, raisedTs)
 	event.EventType = &de
 	if err := ep.sendEvent(&event); err != nil {
-		log.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
 		return err
 	}
-	log.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
 
@@ -131,7 +131,7 @@
 	if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
 		return err
 	}
-	log.Debugw("Sent event to kafka", log.Fields{"event": event})
+	logger.Debugw("Sent event to kafka", log.Fields{"event": event})
 
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
index bcb45f8..7697c05 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/performance_metrics.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
@@ -17,7 +17,7 @@
 package common
 
 import (
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 type PmMetrics struct {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
similarity index 74%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/request_handler.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
index dfcaf1e..414116b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
@@ -17,15 +17,16 @@
 
 import (
 	"errors"
+
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-lib-go/v2/pkg/adapters"
-	"github.com/opencord/voltha-lib-go/v2/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v2/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
-	"github.com/opencord/voltha-protos/v2/go/openflow_13"
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -59,7 +60,7 @@
 
 func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -70,23 +71,23 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	log.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
+	logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
 
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
@@ -101,7 +102,7 @@
 
 func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -113,17 +114,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -144,7 +145,7 @@
 
 func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -156,17 +157,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -182,7 +183,7 @@
 
 func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -194,17 +195,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -220,7 +221,7 @@
 
 func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -232,17 +233,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -263,7 +264,7 @@
 
 func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -275,17 +276,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				log.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -304,9 +305,9 @@
 }
 
 func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
-	log.Debug("Update_flows_bulk")
+	logger.Debug("Update_flows_bulk")
 	if len(args) < 5 {
-		log.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -319,32 +320,32 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flows":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				log.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "groups":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				log.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				log.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	log.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the bulk flow update API of the adapter
 	if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -353,9 +354,9 @@
 }
 
 func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
-	log.Debug("Update_flows_incrementally")
+	logger.Debug("Update_flows_incrementally")
 	if len(args) < 5 {
-		log.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -368,32 +369,32 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				log.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "group_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				log.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				log.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	log.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the incremental flow update API of the adapter
 	if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -402,9 +403,9 @@
 }
 
 func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
-	log.Debug("Update_pm_config")
+	logger.Debug("Update_pm_config")
 	if len(args) < 2 {
-		log.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -415,22 +416,22 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pm_configs":
 			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
-				log.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	log.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+	logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
 	//Invoke the pm config update API of the adapter
 	if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -439,9 +440,9 @@
 }
 
 func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
-	log.Debugw("Receive_packet_out", log.Fields{"args": args})
+	logger.Debugw("Receive_packet_out", log.Fields{"args": args})
 	if len(args) < 3 {
-		log.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -453,27 +454,27 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				log.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
 				return nil, err
 			}
 		case "outPort":
 			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
-				log.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
 				return nil, err
 			}
 		case "packet":
 			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
-				log.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	log.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
 	//Invoke the adopt device on the adapter
 	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
@@ -491,7 +492,7 @@
 
 func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
 	if len(args) < 2 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -501,31 +502,31 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	log.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+	logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
 
 	var cap *ic.SwitchCapability
 	var err error
 	if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
-	log.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
+	logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
 	return cap, nil
 }
 
 func (rhp *RequestHandlerProxy) Get_ofp_port_info(args []*ic.Argument) (*ic.PortCapability, error) {
 	if len(args) < 3 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -536,22 +537,22 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "port_no":
 			if err := ptypes.UnmarshalAny(arg.Value, pNo); err != nil {
-				log.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-port-no", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	log.Debugw("Get_ofp_port_info", log.Fields{"deviceId": device.Id, "portNo": pNo.Val})
+	logger.Debugw("Get_ofp_port_info", log.Fields{"deviceId": device.Id, "portNo": pNo.Val})
 	var cap *ic.PortCapability
 	var err error
 	if cap, err = rhp.adapter.Get_ofp_port_info(device, pNo.Val); err != nil {
@@ -562,7 +563,7 @@
 
 func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 2 {
-		log.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -572,18 +573,18 @@
 		switch arg.Key {
 		case "msg":
 			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
-				log.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				log.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	log.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
 
 	//Invoke the inter adapter API on the handler
 	if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/common/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
similarity index 93%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/iAdapter.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
index 38aac38..3b86ac5 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/adapters/iAdapter.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
@@ -16,9 +16,9 @@
 package adapters
 
 import (
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
-	"github.com/opencord/voltha-protos/v2/go/openflow_13"
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 //IAdapter represents the set of APIs a voltha adapter has to support.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
similarity index 86%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
index b2547c2..23ad5a0 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
@@ -20,13 +20,14 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
 	"strconv"
 	"sync"
 	"time"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
 )
 
 const (
@@ -65,7 +66,7 @@
 
 	address := host + ":" + strconv.Itoa(port)
 	if b.Client, err = b.newClient(address, timeout); err != nil {
-		log.Errorw("failed-to-create-kv-client",
+		logger.Errorw("failed-to-create-kv-client",
 			log.Fields{
 				"type": storeType, "host": host, "port": port,
 				"timeout": timeout, "prefix": pathPrefix,
@@ -99,11 +100,11 @@
 	if b.liveness != nil {
 
 		if b.alive != alive {
-			log.Debug("update-liveness-channel-reason-change")
+			logger.Debug("update-liveness-channel-reason-change")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		} else if time.Now().Sub(b.lastLivenessTime) > b.LivenessChannelInterval {
-			log.Debug("update-liveness-channel-reason-interval")
+			logger.Debug("update-liveness-channel-reason-interval")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		}
@@ -111,7 +112,7 @@
 
 	// Emit log message only for alive state change
 	if b.alive != alive {
-		log.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
 		b.alive = alive
 	}
 }
@@ -120,7 +121,7 @@
 // post on Liveness channel
 func (b *Backend) PerformLivenessCheck(timeout int) bool {
 	alive := b.Client.IsConnectionUp(timeout)
-	log.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
 
 	b.updateLiveness(alive)
 	return alive
@@ -132,10 +133,10 @@
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
 func (b *Backend) EnableLivenessChannel() chan bool {
-	log.Debug("enable-kvstore-liveness-channel")
+	logger.Debug("enable-kvstore-liveness-channel")
 
 	if b.liveness == nil {
-		log.Debug("create-kvstore-liveness-channel")
+		logger.Debug("create-kvstore-liveness-channel")
 
 		// Channel size of 10 to avoid any possibility of blocking in Load conditions
 		b.liveness = make(chan bool, 10)
@@ -191,7 +192,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.List(formattedPath, b.Timeout)
 
@@ -206,7 +207,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.Get(formattedPath, b.Timeout)
 
@@ -221,7 +222,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
+	logger.Debugw("putting-key", log.Fields{"key": key, "value": string(value.([]byte)), "path": formattedPath})
 
 	err := b.Client.Put(formattedPath, value, b.Timeout)
 
@@ -236,7 +237,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Delete(formattedPath, b.Timeout)
 
@@ -251,7 +252,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
 
 	return b.Client.Watch(formattedPath)
 }
@@ -262,7 +263,7 @@
 	defer b.Unlock()
 
 	formattedPath := b.makePath(key)
-	log.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
 
 	b.Client.CloseWatch(formattedPath, ch)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
similarity index 70%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
index 0851ede..a5a79ae 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -16,18 +16,19 @@
 package db
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 const (
-	logLevel = log.FatalLevel
+	logLevel = log.ErrorLevel
 )
 
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
 func init() {
 	// Setup this package so that it's log level can be modified at run time
-	_, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
index 97fbec9..088593a 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
@@ -15,10 +15,6 @@
  */
 package kvstore
 
-import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-)
-
 const (
 	// Default timeout in seconds when making a kvstore request
 	defaultKVGetTimeout = 5
@@ -43,10 +39,6 @@
 	Lease   int64
 }
 
-func init() {
-	log.AddPackage(log.JSON, log.WarnLevel, nil)
-}
-
 // NewKVPair creates a new KVPair object
 func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
 	kv := new(KVPair)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
similarity index 69%
copy from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
index 0851ede..2d2a6a6 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,21 +13,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package kvstore
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 const (
-	logLevel = log.FatalLevel
+	logLevel = log.ErrorLevel
 )
 
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
 func init() {
 	// Setup this package so that it's log level can be modified at run time
-	_, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kvstore"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
similarity index 86%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/consulclient.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
index a94de4d..e391293 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/consulclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
@@ -19,7 +19,7 @@
 	"bytes"
 	"context"
 	"errors"
-	log "github.com/opencord/voltha-lib-go/v2/pkg/log"
+	log "github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"sync"
 	"time"
 	//log "ciena.com/coordinator/common"
@@ -53,7 +53,7 @@
 	config.WaitTime = duration
 	consul, err := consulapi.NewClient(config)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 
@@ -65,7 +65,7 @@
 
 // IsConnectionUp returns whether the connection to the Consul KV store is up
 func (c *ConsulClient) IsConnectionUp(timeout int) bool {
-	log.Error("Unimplemented function")
+	logger.Error("Unimplemented function")
 	return false
 }
 
@@ -80,7 +80,7 @@
 	// For now we ignore meta data
 	kvps, _, err := kv.List(key, &queryOptions)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -102,7 +102,7 @@
 	// For now we ignore meta data
 	kvp, _, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 	if kvp != nil {
@@ -121,7 +121,7 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		log.Error(er)
+		logger.Error(er)
 		return er
 	}
 
@@ -133,7 +133,7 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Put(&kvp, &writeOptions)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return err
 	}
 	return nil
@@ -148,7 +148,7 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Delete(key, &writeOptions)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return err
 	}
 	return nil
@@ -156,11 +156,11 @@
 
 func (c *ConsulClient) deleteSession() {
 	if c.sessionID != "" {
-		log.Debug("cleaning-up-session")
+		logger.Debug("cleaning-up-session")
 		session := c.consul.Session()
 		_, err := session.Destroy(c.sessionID, nil)
 		if err != nil {
-			log.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
 		}
 	}
 	c.sessionID = ""
@@ -177,17 +177,17 @@
 	for {
 		id, meta, err := session.Create(entry, nil)
 		if err != nil {
-			log.Errorw("create-session-error", log.Fields{"error": err})
+			logger.Errorw("create-session-error", log.Fields{"error": err})
 			if retries == 0 {
 				return nil, "", err
 			}
 		} else if meta.RequestTime == 0 {
-			log.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
 			if retries == 0 {
 				return nil, "", errors.New("bad-meta-data")
 			}
 		} else if id == "" {
-			log.Error("create-session-nil-id")
+			logger.Error("create-session-nil-id")
 			if retries == 0 {
 				return nil, "", errors.New("ID-nil")
 			}
@@ -198,7 +198,7 @@
 		if retries > 0 {
 			retries--
 		}
-		log.Debug("retrying-session-create-after-a-second-delay")
+		logger.Debug("retrying-session-create-after-a-second-delay")
 		time.Sleep(time.Duration(1) * time.Second)
 	}
 }
@@ -225,7 +225,7 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		log.Error(er)
+		logger.Error(er)
 		return nil, er
 	}
 
@@ -238,17 +238,17 @@
 	reservationSuccessful := false
 	defer func() {
 		if !reservationSuccessful {
-			log.Debug("deleting-session")
+			logger.Debug("deleting-session")
 			c.deleteSession()
 		}
 	}()
 
 	session, sessionID, err := c.createSession(ttl, -1)
 	if err != nil {
-		log.Errorw("no-session-created", log.Fields{"error": err})
+		logger.Errorw("no-session-created", log.Fields{"error": err})
 		return "", errors.New("no-session-created")
 	}
-	log.Debugw("session-created", log.Fields{"session-id": sessionID})
+	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
 	c.sessionID = sessionID
 	c.session = session
 
@@ -257,11 +257,11 @@
 	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
 	result, _, err := kv.Acquire(&kvp, nil)
 	if err != nil {
-		log.Errorw("error-acquiring-keys", log.Fields{"error": err})
+		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
 		return nil, err
 	}
 
-	log.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
 
 	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
 	m, err := c.Get(key, defaultKVGetTimeout)
@@ -269,7 +269,7 @@
 		return nil, err
 	}
 	if m != nil {
-		log.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
 		if m.Key == key && isEqual(m.Value, value) {
 			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
 			// per session.
@@ -299,11 +299,11 @@
 		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
 		result, _, err = kv.Release(&kvp, nil)
 		if err != nil {
-			log.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		if !result {
-			log.Errorw("cannot-release-reservation", log.Fields{"key": key})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
 		}
 		delete(c.keyReservations, key)
 	}
@@ -390,14 +390,14 @@
 	c.writeLock.Lock()
 	defer c.writeLock.Unlock()
 	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
-		log.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chCtxMap := range watchedChannelsContexts {
 		if chCtxMap.channel == ch {
-			log.Debug("channel-found")
+			logger.Debug("channel-found")
 			chCtxMap.cancel()
 			//close the channel
 			close(ch)
@@ -409,7 +409,7 @@
 	if pos >= 0 {
 		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
 	}
-	log.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
 }
 
 func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
@@ -430,7 +430,7 @@
 }
 
 func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
-	log.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
 
 	defer c.CloseWatch(key, ch)
 	duration := GetDuration(defaultKVGetTimeout)
@@ -441,7 +441,7 @@
 	// Get the existing value, if any
 	previousKVPair, meta, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		log.Debug(err)
+		logger.Debug(err)
 	}
 	lastIndex := meta.LastIndex
 
@@ -456,30 +456,30 @@
 		pair, meta, err = kv.Get(key, waitOptions)
 		select {
 		case <-watchContext.Done():
-			log.Debug("done-event-received-exiting")
+			logger.Debug("done-event-received-exiting")
 			return
 		default:
 			if err != nil {
-				log.Warnw("error-from-watch", log.Fields{"error": err})
+				logger.Warnw("error-from-watch", log.Fields{"error": err})
 				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
 			} else {
-				log.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
 			}
 		}
 		if err != nil {
-			log.Debug(err)
+			logger.Debug(err)
 			// On error, block for 10 milliseconds to prevent endless loop
 			time.Sleep(10 * time.Millisecond)
 		} else if meta.LastIndex <= lastIndex {
-			log.Info("no-index-change-or-negative")
+			logger.Info("no-index-change-or-negative")
 		} else {
-			log.Debugw("update-received", log.Fields{"pair": pair})
+			logger.Debugw("update-received", log.Fields{"pair": pair})
 			if pair == nil {
 				ch <- NewEvent(DELETE, key, []byte(""), -1)
 			} else if !c.isKVEqual(pair, previousKVPair) {
 				// Push the change onto the channel if the data has changed
 				// For now just assume it's a PUT change
-				log.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
 				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
 			}
 			previousKVPair = pair
@@ -500,7 +500,7 @@
 
 	// Clear the sessionID
 	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
-		log.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
similarity index 91%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/etcdclient.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
index 3ae767c..7096748 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
@@ -19,7 +19,7 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	v3Client "go.etcd.io/etcd/clientv3"
 	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
 	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
@@ -50,7 +50,7 @@
 		DialTimeout: duration,
 	})
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 
@@ -82,7 +82,7 @@
 	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
 	cancel()
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -102,7 +102,7 @@
 	resp, err := c.ectdAPI.Get(ctx, key)
 	cancel()
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 	for _, ev := range resp.Kvs {
@@ -143,13 +143,13 @@
 	if err != nil {
 		switch err {
 		case context.Canceled:
-			log.Warnw("context-cancelled", log.Fields{"error": err})
+			logger.Warnw("context-cancelled", log.Fields{"error": err})
 		case context.DeadlineExceeded:
-			log.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
 		case v3rpcTypes.ErrEmptyKey:
-			log.Warnw("etcd-client-error", log.Fields{"error": err})
+			logger.Warnw("etcd-client-error", log.Fields{"error": err})
 		default:
-			log.Warnw("bad-endpoints", log.Fields{"error": err})
+			logger.Warnw("bad-endpoints", log.Fields{"error": err})
 		}
 		return err
 	}
@@ -171,10 +171,10 @@
 
 	// delete the key
 	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
-		log.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
 		return err
 	}
-	log.Debugw("key(s)-deleted", log.Fields{"key": key})
+	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
 	return nil
 }
 
@@ -199,7 +199,7 @@
 
 	resp, err := c.ectdAPI.Grant(ctx, ttl)
 	if err != nil {
-		log.Error(err)
+		logger.Error(err)
 		return nil, err
 	}
 	// Register the lease id
@@ -212,7 +212,7 @@
 	defer func() {
 		if !reservationSuccessful {
 			if err = c.ReleaseReservation(key); err != nil {
-				log.Error("cannot-release-lease")
+				logger.Error("cannot-release-lease")
 			}
 		}
 	}()
@@ -270,7 +270,7 @@
 	for key, leaseID := range c.keyReservations {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			log.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -281,7 +281,7 @@
 // ReleaseReservation releases reservation for a specific key.
 func (c *EtcdClient) ReleaseReservation(key string) error {
 	// Get the leaseid using the key
-	log.Debugw("Release-reservation", log.Fields{"key": key})
+	logger.Debugw("Release-reservation", log.Fields{"key": key})
 	var ok bool
 	var leaseID *v3Client.LeaseID
 	c.writeLock.Lock()
@@ -296,7 +296,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			log.Error(err)
+			logger.Error(err)
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -322,7 +322,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
 		if err != nil {
-			log.Errorw("lease-may-have-expired", log.Fields{"error": err})
+			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
 			return err
 		}
 	} else {
@@ -349,7 +349,7 @@
 
 	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
 	// json format.
-	log.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
 	// Launch a go routine to listen for updates
 	go c.listenForKeyChange(channel, ch, cancel)
 
@@ -406,17 +406,17 @@
 	defer c.writeLock.Unlock()
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
-		log.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chMap := range watchedChannels {
 		if t, ok := chMap[ch]; ok {
-			log.Debug("channel-found")
+			logger.Debug("channel-found")
 			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
 			if err := t.Close(); err != nil {
-				log.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
 			}
 			pos = i
 			break
@@ -428,11 +428,11 @@
 	if pos >= 0 {
 		channelMaps = c.removeChannelMap(key, pos)
 	}
-	log.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
 func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
-	log.Debug("start-listening-on-channel ...")
+	logger.Debug("start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
 	for resp := range channel {
@@ -440,7 +440,7 @@
 			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
 		}
 	}
-	log.Debug("stop-listening-on-channel ...")
+	logger.Debug("stop-listening-on-channel ...")
 }
 
 func getEventType(event *v3Client.Event) int {
@@ -458,7 +458,7 @@
 	c.writeLock.Lock()
 	defer c.writeLock.Unlock()
 	if err := c.ectdAPI.Close(); err != nil {
-		log.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
similarity index 69%
copy from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
index 0851ede..b4fe7ec 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,21 +13,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package flows
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 const (
-	logLevel = log.FatalLevel
+	logLevel = log.ErrorLevel
 )
 
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
 func init() {
 	// Setup this package so that it's log level can be modified at run time
-	_, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "flowsUtils"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
index 02a4b0b..b9981e6 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
@@ -21,8 +21,8 @@
 	"fmt"
 	"github.com/cevaris/ordered_map"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ofp "github.com/opencord/voltha-protos/v2/go/openflow_13"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
 	"math/big"
 	"strings"
 )
@@ -510,7 +510,7 @@
 			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
 		}
 	}
-	log.Debug("No-metadata-present")
+	logger.Debug("No-metadata-present")
 	return 0
 }
 
@@ -523,7 +523,7 @@
 			return field.GetTableMetadata()
 		}
 	}
-	log.Debug("No-metadata-present")
+	logger.Debug("No-metadata-present")
 	return 0
 }
 
@@ -538,7 +538,7 @@
 			}
 		}
 	}
-	log.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
+	logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
 	return 0
 }
 
@@ -552,10 +552,10 @@
 	   This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var tpId uint16 = 0
-	log.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
 	if metadata != 0 {
 		tpId = uint16((metadata >> 32) & 0xFFFF)
-		log.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+		logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
 	}
 	return tpId
 }
@@ -570,10 +570,10 @@
 	*/
 	var uniPort uint32 = 0
 	md := GetMetadataFromWriteMetadataAction(flow)
-	log.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
 	if md != 0 {
 		uniPort = uint32(md & 0xFFFFFFFF)
-		log.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+		logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
 	}
 	return uniPort
 
@@ -591,7 +591,7 @@
 	md := GetMetadataFromWriteMetadataAction(flow)
 	if md != 0 {
 		innerTag = uint16((md >> 48) & 0xFFFF)
-		log.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+		logger.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
 	}
 	return innerTag
 }
@@ -605,7 +605,7 @@
 		return 0
 	}
 	if md <= 0xffffffff {
-		log.Debugw("onos-upgrade-suggested", log.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
 		return md
 	}
 	return (md >> 32) & 0xffffffff
@@ -730,7 +730,7 @@
 	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
 		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
 	if meterMod == nil {
-		log.Error("Invalid meter mod command")
+		logger.Error("Invalid meter mod command")
 		return meter
 	}
 	// config init
@@ -752,7 +752,7 @@
 		bandStats = append(bandStats, band)
 	}
 	meter.Stats.BandStats = bandStats
-	log.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
+	logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
 	return meter
 
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
similarity index 96%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
index bda7ed9..6289043 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
@@ -18,7 +18,7 @@
 import (
 	"time"
 
-	ca "github.com/opencord/voltha-protos/v2/go/inter_container"
+	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
similarity index 69%
copy from vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
index 0851ede..cb6acb2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
@@ -1,5 +1,5 @@
 /*
- * Copyright 2019-present Open Networking Foundation
+ * Copyright 2020-present Open Networking Foundation
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -13,21 +13,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package kafka
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
 const (
-	logLevel = log.FatalLevel
+	logLevel = log.ErrorLevel
 )
 
-// Unit test initialization. This init() function handles all unit tests in
-// the current directory.
+var logger log.Logger
+
 func init() {
 	// Setup this package so that it's log level can be modified at run time
-	_, err := log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "db"})
+	var err error
+	logger, err = log.AddPackage(log.JSON, logLevel, log.Fields{"pkg": "kafka"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
similarity index 86%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/kafka_inter_container_library.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
index 4e04b30..042e121 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/kafka_inter_container_library.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
@@ -19,22 +19,18 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
 	"reflect"
 	"strings"
 	"sync"
 	"time"
-)
 
-// Initialize the logger - gets the default until the main function setup the logger
-func init() {
-	log.AddPackage(log.JSON, log.DebugLevel, nil)
-}
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+)
 
 const (
 	DefaultMaxRetries     = 3
@@ -148,11 +144,11 @@
 }
 
 func (kp *InterContainerProxy) Start() error {
-	log.Info("Starting-Proxy")
+	logger.Info("Starting-Proxy")
 
 	// Kafka MsgClient should already have been created.  If not, output fatal error
 	if kp.kafkaClient == nil {
-		log.Fatal("kafka-client-not-set")
+		logger.Fatal("kafka-client-not-set")
 	}
 
 	// Create the Done channel
@@ -160,7 +156,7 @@
 
 	// Start the kafka client
 	if err := kp.kafkaClient.Start(); err != nil {
-		log.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
+		logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
 		return err
 	}
 
@@ -177,7 +173,7 @@
 }
 
 func (kp *InterContainerProxy) Stop() {
-	log.Info("stopping-intercontainer-proxy")
+	logger.Info("stopping-intercontainer-proxy")
 	kp.doneCh <- 1
 	// TODO : Perform cleanup
 	kp.kafkaClient.Stop()
@@ -188,10 +184,10 @@
 
 // DeviceDiscovered publish the discovered device onto the kafka messaging bus
 func (kp *InterContainerProxy) DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error {
-	log.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
+	logger.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
 	//	Simple validation
 	if deviceId == "" || deviceType == "" {
-		log.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
+		logger.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
 		return errors.New("invalid-parameters")
 	}
 	//	Create the device discovery message
@@ -212,7 +208,7 @@
 	var marshalledData *any.Any
 	var err error
 	if marshalledData, err = ptypes.MarshalAny(body); err != nil {
-		log.Errorw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
 		return err
 	}
 	msg := &ic.InterContainerMessage{
@@ -222,7 +218,7 @@
 
 	// Send the message
 	if err := kp.kafkaClient.Send(msg, kp.deviceDiscoveryTopic); err != nil {
-		log.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
+		logger.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
 		return err
 	}
 	return nil
@@ -242,7 +238,7 @@
 	// Encode the request
 	protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
 	if err != nil {
-		log.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
 		return false, nil
 	}
 
@@ -251,7 +247,7 @@
 	if waitForResponse {
 		var err error
 		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
-			log.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
 		}
 	}
 
@@ -259,7 +255,7 @@
 	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
 	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
 	//key := GetDeviceIdFromTopic(*toTopic)
-	log.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
 	go kp.kafkaClient.Send(protoRequest, toTopic, key)
 
 	if waitForResponse {
@@ -279,7 +275,7 @@
 		select {
 		case msg, ok := <-ch:
 			if !ok {
-				log.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
 				protoError := &ic.Error{Reason: "channel-closed"}
 				var marshalledArg *any.Any
 				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
@@ -287,15 +283,15 @@
 				}
 				return false, marshalledArg
 			}
-			log.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
 			var responseBody *ic.InterContainerResponseBody
 			var err error
 			if responseBody, err = decodeResponse(msg); err != nil {
-				log.Errorw("decode-response-error", log.Fields{"error": err})
+				logger.Errorw("decode-response-error", log.Fields{"error": err})
 			}
 			return responseBody.Success, responseBody.Result
 		case <-ctx.Done():
-			log.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: ctx.Err().Error()}
 			var marshalledArg *any.Any
@@ -304,7 +300,7 @@
 			}
 			return false, marshalledArg
 		case <-childCtx.Done():
-			log.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: childCtx.Err().Error()}
 			var marshalledArg *any.Any
@@ -313,7 +309,7 @@
 			}
 			return false, marshalledArg
 		case <-kp.doneCh:
-			log.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
 			return true, nil
 		}
 	}
@@ -329,7 +325,7 @@
 	var err error
 	if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
 		//if ch, err = kp.Subscribe(topic); err != nil {
-		log.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 
@@ -348,7 +344,7 @@
 	var ch <-chan *ic.InterContainerMessage
 	var err error
 	if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
-		log.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
@@ -387,7 +383,7 @@
 		// Unsubscribe to this topic first - this will close the subscribed channel
 		var err error
 		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			log.Errorw("unsubscribing-error", log.Fields{"topic": topic})
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
 		}
 		delete(kp.topicToResponseChannelMap, topic)
 		return err
@@ -403,7 +399,7 @@
 	for topic, _ := range kp.topicToResponseChannelMap {
 		// Unsubscribe to this topic first - this will close the subscribed channel
 		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			log.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
 		}
 		delete(kp.topicToResponseChannelMap, topic)
 	}
@@ -438,7 +434,7 @@
 	for topic, _ := range kp.topicToRequestHandlerChannelMap {
 		// Close the kafka client client first by unsubscribing to this topic
 		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
-			log.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
 		}
 		delete(kp.topicToRequestHandlerChannelMap, topic)
 	}
@@ -486,10 +482,10 @@
 func (kp *InterContainerProxy) DeleteTopic(topic Topic) error {
 	// If we have any consumers on that topic we need to close them
 	if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
-		log.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+		logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
 	}
 	if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
-		log.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+		logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
 	}
 	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
 
@@ -503,7 +499,7 @@
 	}
 	protoValue, ok := returnedVal.(proto.Message)
 	if !ok {
-		log.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
 		err := errors.New("response-value-not-proto-message")
 		return nil, err
 	}
@@ -512,7 +508,7 @@
 	var marshalledReturnedVal *any.Any
 	var err error
 	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
-		log.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
 		return nil, err
 	}
 	return marshalledReturnedVal, nil
@@ -534,7 +530,7 @@
 	var err error
 	// Error should never happen here
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		log.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
 	}
 
 	return &ic.InterContainerMessage{
@@ -547,7 +543,7 @@
 //formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
 //or an error on failure
 func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
-	//log.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+	//logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
 	responseHeader := &ic.Header{
 		Id:        request.Header.Id,
 		Type:      ic.MessageType_RESPONSE,
@@ -562,7 +558,7 @@
 	var err error
 	for _, returnVal := range returnedValues {
 		if marshalledReturnedVal, err = encodeReturnedValue(returnVal); err != nil {
-			log.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
 		}
 		break // for now we support only 1 returned value - (excluding the error)
 	}
@@ -575,7 +571,7 @@
 	// Marshal the response body
 	var marshalledResponseBody *any.Any
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		log.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
 		return nil, err
 	}
 
@@ -611,7 +607,7 @@
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
-		log.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -625,7 +621,7 @@
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
-		log.Warnw("cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -645,9 +641,9 @@
 		// Get the request body
 		requestBody := &ic.InterContainerRequestBody{}
 		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
-			log.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
 		} else {
-			log.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
 			// let the callee unpack the arguments as its the only one that knows the real proto type
 			// Augment the requestBody with the message Id as it will be used in scenarios where cores
 			// are set in pairs and competing
@@ -659,7 +655,7 @@
 
 			out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
 			if err != nil {
-				log.Warn(err)
+				logger.Warn(err)
 			}
 		}
 		// Response required?
@@ -679,7 +675,7 @@
 				if out[lastIndex].Interface() != nil { // Error
 					if retError, ok := out[lastIndex].Interface().(error); ok {
 						if retError.Error() == ErrorTransactionNotAcquired.Error() {
-							log.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
 							return // Ignore - process is in competing mode and ignored transaction
 						}
 						returnError = &ic.Error{Reason: retError.Error()}
@@ -689,12 +685,12 @@
 						returnedValues = append(returnedValues, returnError)
 					}
 				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
-					log.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
 					return // Ignore - should not happen
 				} else { // Non-error case
 					success = true
 					for idx, val := range out {
-						//log.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						//logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
 						if idx != lastIndex {
 							returnedValues = append(returnedValues, val.Interface())
 						}
@@ -704,7 +700,7 @@
 
 			var icm *ic.InterContainerMessage
 			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
-				log.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
 				icm = encodeDefaultFailedResponse(msg)
 			}
 			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
@@ -713,22 +709,22 @@
 			// partitions.
 			replyTopic := &Topic{Name: msg.Header.FromTopic}
 			key := msg.Header.KeyTopic
-			log.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
 			// TODO: handle error response.
 			go kp.kafkaClient.Send(icm, replyTopic, key)
 		}
 	} else if msg.Header.Type == ic.MessageType_RESPONSE {
-		log.Debugw("response-received", log.Fields{"msg-header": msg.Header})
+		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
 		go kp.dispatchResponse(msg)
 	} else {
-		log.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
+		logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
 	}
 }
 
 func (kp *InterContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
 	//	Wait for messages
 	for msg := range ch {
-		//log.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		//logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
 		go kp.handleMessage(msg, targetInterface)
 	}
 }
@@ -737,7 +733,7 @@
 	kp.lockTransactionIdToChannelMap.RLock()
 	defer kp.lockTransactionIdToChannelMap.RUnlock()
 	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
-		log.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
 		return
 	}
 	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
@@ -748,7 +744,7 @@
 // API. There is one response channel waiting for kafka messages before dispatching the message to the
 // corresponding waiting channel
 func (kp *InterContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
-	log.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+	logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
 
 	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
 	// broadcast any message for this topic to all channels waiting on it.
@@ -759,7 +755,7 @@
 }
 
 func (kp *InterContainerProxy) unSubscribeForResponse(trnsId string) error {
-	log.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+	logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
 	kp.deleteFromTransactionIdToChannelMap(trnsId)
 	return nil
 }
@@ -803,12 +799,12 @@
 		// ascertain the value interface type is a proto.Message
 		protoValue, ok := arg.Value.(proto.Message)
 		if !ok {
-			log.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
 			err := errors.New("argument-value-not-proto-message")
 			return nil, err
 		}
 		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
-			log.Warnw("cannot-marshal-request", log.Fields{"error": err})
+			logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
 			return nil, err
 		}
 		protoArg := &ic.Argument{
@@ -821,7 +817,7 @@
 	var marshalledData *any.Any
 	var err error
 	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
-		log.Warnw("cannot-marshal-request", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
 		return nil, err
 	}
 	request := &ic.InterContainerMessage{
@@ -835,10 +831,10 @@
 	//	Extract the message body
 	responseBody := ic.InterContainerResponseBody{}
 	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
-		log.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
+		logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		return nil, err
 	}
-	//log.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+	//logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
 
 	return &responseBody, nil
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
index c05df69..9d4ab52 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
@@ -19,22 +19,19 @@
 	"context"
 	"errors"
 	"fmt"
+	"strings"
+	"sync"
+	"time"
+
 	"github.com/Shopify/sarama"
 	scc "github.com/bsm/sarama-cluster"
 	"github.com/eapache/go-resiliency/breaker"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	ic "github.com/opencord/voltha-protos/v2/go/inter_container"
-	"strings"
-	"sync"
-	"time"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
-func init() {
-	log.AddPackage(log.JSON, log.DebugLevel, nil)
-}
-
 type returnErrorFunction func() error
 
 // consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
@@ -241,7 +238,7 @@
 }
 
 func (sc *SaramaClient) Start() error {
-	log.Info("Starting-kafka-sarama-client")
+	logger.Info("Starting-kafka-sarama-client")
 
 	// Create the Done channel
 	sc.doneCh = make(chan int, 1)
@@ -257,20 +254,20 @@
 
 	// Create the Cluster Admin
 	if err = sc.createClusterAdmin(); err != nil {
-		log.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
+		logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
 		return err
 	}
 
 	// Create the Publisher
 	if err := sc.createPublisher(); err != nil {
-		log.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
+		logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
 		return err
 	}
 
 	if sc.consumerType == DefaultConsumerType {
 		// Create the master consumers
 		if err := sc.createConsumer(); err != nil {
-			log.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
+			logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
 			return err
 		}
 	}
@@ -278,7 +275,7 @@
 	// Create the topic to consumers/channel map
 	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
 
-	log.Info("kafka-sarama-client-started")
+	logger.Info("kafka-sarama-client-started")
 
 	sc.started = true
 
@@ -286,7 +283,7 @@
 }
 
 func (sc *SaramaClient) Stop() {
-	log.Info("stopping-sarama-client")
+	logger.Info("stopping-sarama-client")
 
 	sc.started = false
 
@@ -295,33 +292,33 @@
 
 	if sc.producer != nil {
 		if err := sc.producer.Close(); err != nil {
-			log.Errorw("closing-producer-failed", log.Fields{"error": err})
+			logger.Errorw("closing-producer-failed", log.Fields{"error": err})
 		}
 	}
 
 	if sc.consumer != nil {
 		if err := sc.consumer.Close(); err != nil {
-			log.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
+			logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
 		}
 	}
 
 	for key, val := range sc.groupConsumers {
-		log.Debugw("closing-group-consumer", log.Fields{"topic": key})
+		logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
 		if err := val.Close(); err != nil {
-			log.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+			logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
 		}
 	}
 
 	if sc.cAdmin != nil {
 		if err := sc.cAdmin.Close(); err != nil {
-			log.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
+			logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
 		}
 	}
 
 	//TODO: Clear the consumers map
 	//sc.clearConsumerChannelMap()
 
-	log.Info("sarama-client-stopped")
+	logger.Info("sarama-client-stopped")
 }
 
 //createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
@@ -338,15 +335,15 @@
 	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
 		if err == sarama.ErrTopicAlreadyExists {
 			//	Not an error
-			log.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		log.Errorw("create-topic-failure", log.Fields{"error": err})
+		logger.Errorw("create-topic-failure", log.Fields{"error": err})
 		return err
 	}
 	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
 	// do so.
-	log.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
 	return nil
 }
 
@@ -368,16 +365,16 @@
 	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
 		if err == sarama.ErrUnknownTopicOrPartition {
 			//	Not an error as does not exist
-			log.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		log.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 
 	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
 	if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
-		log.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+		logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 	return nil
@@ -389,11 +386,11 @@
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	log.Debugw("subscribe", log.Fields{"topic": topic.Name})
+	logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
 
 	// If a consumers already exist for that topic then resuse it
 	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
-		log.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
+		logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
 		// Create a channel specific for that consumers and add it to the consumers channel map
 		ch := make(chan *ic.InterContainerMessage)
 		sc.addChannelToConsumerChannelMap(topic, ch)
@@ -408,12 +405,12 @@
 	if sc.consumerType == PartitionConsumer {
 		if sc.autoCreateTopic {
 			if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-				log.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+				logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
 				return nil, err
 			}
 		}
 		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
-			log.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 	} else if sc.consumerType == GroupCustomer {
@@ -421,7 +418,7 @@
 		// does not consume from a precreated topic in some scenarios
 		//if sc.autoCreateTopic {
 		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-		//		log.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+		//		logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
 		//		return nil, err
 		//	}
 		//}
@@ -435,12 +432,12 @@
 			groupId = sc.consumerGroupPrefix + topic.Name
 		}
 		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
-			log.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 			return nil, err
 		}
 
 	} else {
-		log.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
 		return nil, errors.New("unknown-consumer-type")
 	}
 
@@ -452,13 +449,13 @@
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	log.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
 	var err error
 	if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
-		log.Errorw("failed-removing-channel", log.Fields{"error": err})
+		logger.Errorw("failed-removing-channel", log.Fields{"error": err})
 	}
 	if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
-		log.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
+		logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
 	}
 	return err
 }
@@ -470,11 +467,11 @@
 	// events to the channel is rate-limited by livenessChannelInterval.
 	if sc.liveness != nil {
 		if sc.alive != alive {
-			log.Info("update-liveness-channel-because-change")
+			logger.Info("update-liveness-channel-because-change")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		} else if time.Now().Sub(sc.lastLivenessTime) > sc.livenessChannelInterval {
-			log.Info("update-liveness-channel-because-interval")
+			logger.Info("update-liveness-channel-because-interval")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		}
@@ -482,7 +479,7 @@
 
 	// Only emit a log message when the state changes
 	if sc.alive != alive {
-		log.Info("set-client-alive", log.Fields{"alive": alive})
+		logger.Info("set-client-alive", log.Fields{"alive": alive})
 		sc.alive = alive
 	}
 }
@@ -491,7 +488,7 @@
 func (sc *SaramaClient) setUnhealthy() {
 	sc.healthy = false
 	if sc.healthiness != nil {
-		log.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
 		sc.healthiness <- sc.healthy
 	}
 }
@@ -511,35 +508,35 @@
 
 	switch err.Error() {
 	case context.DeadlineExceeded.Error():
-		log.Info("is-liveness-error-timeout")
+		logger.Info("is-liveness-error-timeout")
 		return true
 	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
-		log.Info("is-liveness-error-no-brokers")
+		logger.Info("is-liveness-error-no-brokers")
 		return true
 	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
-		log.Info("is-liveness-error-shutting-down")
+		logger.Info("is-liveness-error-shutting-down")
 		return true
 	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
-		log.Info("is-liveness-error-not-available")
+		logger.Info("is-liveness-error-not-available")
 		return true
 	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
-		log.Info("is-liveness-error-circuit-breaker-open")
+		logger.Info("is-liveness-error-circuit-breaker-open")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
-		log.Info("is-liveness-error-connection-refused")
+		logger.Info("is-liveness-error-connection-refused")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
-		log.Info("is-liveness-error-io-timeout")
+		logger.Info("is-liveness-error-io-timeout")
 		return true
 	}
 
 	// Other errors shouldn't trigger a loss of liveness
 
-	log.Infow("is-liveness-error-ignored", log.Fields{"err": err})
+	logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
 
 	return false
 }
@@ -552,7 +549,7 @@
 	var ok bool
 	// ascertain the value interface type is a proto.Message
 	if protoMsg, ok = msg.(proto.Message); !ok {
-		log.Warnw("message-not-proto-message", log.Fields{"msg": msg})
+		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
 		return errors.New(fmt.Sprintf("not-a-proto-msg-%s", msg))
 	}
 
@@ -560,7 +557,7 @@
 	var err error
 	//	Create the Sarama producer message
 	if marshalled, err = proto.Marshal(protoMsg); err != nil {
-		log.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
 		return err
 	}
 	key := ""
@@ -579,10 +576,10 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		log.Debugw("message-sent", log.Fields{"status": ok.Topic})
+		logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
 		sc.updateLiveness(true)
 	case notOk := <-sc.producer.Errors():
-		log.Debugw("error-sending", log.Fields{"status": notOk})
+		logger.Debugw("error-sending", log.Fields{"status": notOk})
 		if sc.isLivenessError(notOk) {
 			sc.updateLiveness(false)
 		}
@@ -597,10 +594,10 @@
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
 func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
-	log.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
+	logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.liveness == nil {
-			log.Info("kafka-create-liveness-channel")
+			logger.Info("kafka-create-liveness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -621,10 +618,10 @@
 // if the kafka consumers die, or some other problem occurs which is
 // catastrophic that would require re-creating the client.
 func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
-	log.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+	logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.healthiness == nil {
-			log.Info("kafka-create-healthiness-channel")
+			logger.Info("kafka-create-healthiness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -659,10 +656,10 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		log.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
+		logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
 		sc.updateLiveness(true)
 	case notOk := <-sc.producer.Errors():
-		log.Debugw("liveness-error-sending", log.Fields{"status": notOk})
+		logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
 		if sc.isLivenessError(notOk) {
 			sc.updateLiveness(false)
 		}
@@ -700,7 +697,7 @@
 	var cAdmin sarama.ClusterAdmin
 	var err error
 	if cAdmin, err = sarama.NewClusterAdmin([]string{kafkaFullAddr}, config); err != nil {
-		log.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": kafkaFullAddr})
+		logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": kafkaFullAddr})
 		return err
 	}
 	sc.cAdmin = cAdmin
@@ -760,7 +757,7 @@
 		consumerCh.channels = append(consumerCh.channels, ch)
 		return
 	}
-	log.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
 }
 
 //closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
@@ -770,7 +767,7 @@
 		//	Is it a partition consumers?
 		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			if errTemp := partionConsumer.Close(); errTemp != nil {
-				log.Debugw("partition!!!", log.Fields{"err": errTemp})
+				logger.Debugw("partition!!!", log.Fields{"err": errTemp})
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
 					err = nil
@@ -800,7 +797,7 @@
 		consumerCh.channels = removeChannel(consumerCh.channels, ch)
 		// If there are no more channels then we can close the consumers itself
 		if len(consumerCh.channels) == 0 {
-			log.Debugw("closing-consumers", log.Fields{"topic": topic})
+			logger.Debugw("closing-consumers", log.Fields{"topic": topic})
 			err := closeConsumers(consumerCh.consumers)
 			//err := consumerCh.consumers.Close()
 			delete(sc.topicToConsumerChannelMap, topic.Name)
@@ -808,7 +805,7 @@
 		}
 		return nil
 	}
-	log.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return errors.New("topic-does-not-exist")
 }
 
@@ -829,7 +826,7 @@
 		delete(sc.topicToConsumerChannelMap, topic.Name)
 		return err
 	}
-	log.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return nil
 }
 
@@ -868,12 +865,12 @@
 	brokers := []string{kafkaFullAddr}
 
 	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
-		log.Errorw("error-starting-publisher", log.Fields{"error": err})
+		logger.Errorw("error-starting-publisher", log.Fields{"error": err})
 		return err
 	} else {
 		sc.producer = producer
 	}
-	log.Info("Kafka-publisher-created")
+	logger.Info("Kafka-publisher-created")
 	return nil
 }
 
@@ -889,12 +886,12 @@
 	brokers := []string{kafkaFullAddr}
 
 	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
-		log.Errorw("error-starting-consumers", log.Fields{"error": err})
+		logger.Errorw("error-starting-consumers", log.Fields{"error": err})
 		return err
 	} else {
 		sc.consumer = consumer
 	}
-	log.Info("Kafka-consumers-created")
+	logger.Info("Kafka-consumers-created")
 	return nil
 }
 
@@ -918,10 +915,10 @@
 	var err error
 
 	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		log.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	log.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
 	//sc.groupConsumers[topic.Name] = consumer
 	sc.addToGroupConsumers(topic.Name, consumer)
@@ -942,7 +939,7 @@
 }
 
 func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
-	log.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+	logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
 startloop:
 	for {
 		select {
@@ -950,38 +947,38 @@
 			if ok {
 				if sc.isLivenessError(err) {
 					sc.updateLiveness(false)
-					log.Warnw("partition-consumers-error", log.Fields{"error": err})
+					logger.Warnw("partition-consumers-error", log.Fields{"error": err})
 				}
 			} else {
 				// Channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
-			//log.Debugw("message-received", log.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			//logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
 			if !ok {
 				// channel is closed
 				break startloop
 			}
 			msgBody := msg.Value
 			sc.updateLiveness(true)
-			log.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				log.Warnw("partition-invalid-message", log.Fields{"error": err})
+				logger.Warnw("partition-invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 		case <-sc.doneCh:
-			log.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	log.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
 	sc.setUnhealthy()
 }
 
 func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
-	log.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+	logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
 startloop:
 	for {
@@ -991,44 +988,44 @@
 				if sc.isLivenessError(err) {
 					sc.updateLiveness(false)
 				}
-				log.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+				logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
 			} else {
-				log.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
 				// channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
 			if !ok {
-				log.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
 				// Channel closed
 				break startloop
 			}
 			sc.updateLiveness(true)
-			log.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			msgBody := msg.Value
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				log.Warnw("invalid-message", log.Fields{"error": err})
+				logger.Warnw("invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 			consumer.MarkOffset(msg, "")
 		case ntf := <-consumer.Notifications():
-			log.Debugw("group-received-notification", log.Fields{"notification": ntf})
+			logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
 		case <-sc.doneCh:
-			log.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	log.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
+	logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
 	sc.setUnhealthy()
 }
 
 func (sc *SaramaClient) startConsumers(topic *Topic) error {
-	log.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
+	logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
 	var consumerCh *consumerChannels
 	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
-		log.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
+		logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
 		return errors.New("consumers-not-exist")
 	}
 	// For each consumer listening for that topic, start a consumption loop
@@ -1038,7 +1035,7 @@
 		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
 			go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
 		} else {
-			log.Errorw("invalid-consumer", log.Fields{"topic": topic})
+			logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
 			return errors.New("invalid-consumer")
 		}
 	}
@@ -1052,7 +1049,7 @@
 	var err error
 
 	if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
-		log.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1085,7 +1082,7 @@
 	var pConsumer *scc.Consumer
 	var err error
 	if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		log.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
@@ -1106,10 +1103,10 @@
 }
 
 func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
-	log.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
+	logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
 	partitionList, err := sc.consumer.Partitions(topic.Name)
 	if err != nil {
-		log.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1117,7 +1114,7 @@
 	for _, partition := range partitionList {
 		var pConsumer sarama.PartitionConsumer
 		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
-			log.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 		pConsumers = append(pConsumers, pConsumer)
@@ -1132,7 +1129,7 @@
 		if channel == ch {
 			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
 			close(channel)
-			log.Debug("channel-closed")
+			logger.Debug("channel-closed")
 			return channels[:len(channels)-1]
 		}
 	}
@@ -1154,7 +1151,7 @@
 		consumer := sc.groupConsumers[topic]
 		delete(sc.groupConsumers, topic)
 		if err := consumer.Close(); err != nil {
-			log.Errorw("failure-closing-consumer", log.Fields{"error": err})
+			logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/kafka/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
similarity index 95%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
index fe3a4e0..43567e3 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
@@ -59,8 +59,6 @@
 	WarnLevel
 	// ErrorLevel logs a message at error level
 	ErrorLevel
-	// PanicLevel logs a message, then panics.
-	PanicLevel
 	// FatalLevel logs a message, then calls os.Exit(1).
 	FatalLevel
 )
@@ -109,6 +107,9 @@
 
 	// V reports whether verbosity level l is at least the requested verbose level.
 	V(l int) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() int
 }
 
 // Fields is used as key-value pairs for structured logging
@@ -121,8 +122,9 @@
 var cfgs map[string]zp.Config
 
 type logger struct {
-	log    *zp.SugaredLogger
-	parent *zp.Logger
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
 }
 
 func intToAtomicLevel(l int) zp.AtomicLevel {
@@ -135,8 +137,6 @@
 		return zp.NewAtomicLevelAt(zc.WarnLevel)
 	case ErrorLevel:
 		return zp.NewAtomicLevelAt(zc.ErrorLevel)
-	case PanicLevel:
-		return zp.NewAtomicLevelAt(zc.PanicLevel)
 	case FatalLevel:
 		return zp.NewAtomicLevelAt(zc.FatalLevel)
 	}
@@ -153,8 +153,6 @@
 		return zc.WarnLevel
 	case ErrorLevel:
 		return zc.ErrorLevel
-	case PanicLevel:
-		return zc.PanicLevel
 	case FatalLevel:
 		return zc.FatalLevel
 	}
@@ -171,9 +169,23 @@
 		return WarnLevel
 	case zc.ErrorLevel:
 		return ErrorLevel
-	case zc.PanicLevel:
-		return PanicLevel
-	case FatalLevel:
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToInt(l string) int {
+	switch l {
+	case "DEBUG":
+		return DebugLevel
+	case "INFO":
+		return InfoLevel
+	case "WARN":
+		return WarnLevel
+	case "ERROR":
+		return ErrorLevel
+	case "FATAL":
 		return FatalLevel
 	}
 	return ErrorLevel
@@ -258,8 +270,9 @@
 	}
 
 	loggers[pkgName] = &logger{
-		log:    l.Sugar(),
-		parent: l,
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
 	}
 	return loggers[pkgName], nil
 }
@@ -279,8 +292,9 @@
 		}
 
 		loggers[pkgName] = &logger{
-			log:    l.Sugar(),
-			parent: l,
+			log:         l.Sugar(),
+			parent:      l,
+			packageName: pkgName,
 		}
 	}
 	return nil
@@ -326,8 +340,9 @@
 
 	// Set the logger
 	loggers[pkgName] = &logger{
-		log:    l.Sugar(),
-		parent: l,
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
 	}
 	return loggers[pkgName], nil
 }
@@ -342,8 +357,6 @@
 		cfg.Level.SetLevel(zc.WarnLevel)
 	case ErrorLevel:
 		cfg.Level.SetLevel(zc.ErrorLevel)
-	case PanicLevel:
-		cfg.Level.SetLevel(zc.PanicLevel)
 	case FatalLevel:
 		cfg.Level.SetLevel(zc.FatalLevel)
 	default:
@@ -632,6 +645,11 @@
 	return l.parent.Core().Enabled(intToLevel(level))
 }
 
+// GetLogLevel returns the current level of the logger
+func (l logger) GetLogLevel() int {
+	return levelToInt(cfgs[l.packageName].Level.Level())
+}
+
 // With returns a logger initialized with the key-value pairs
 func With(keysAndValues Fields) Logger {
 	return logger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}
@@ -761,3 +779,8 @@
 func V(level int) bool {
 	return getPackageLevelLogger().V(level)
 }
+
+//GetLogLevel returns the log level of the invoking package
+func GetLogLevel() int {
+	return getPackageLevelLogger().GetLogLevel()
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/pmmetrics/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/pmmetrics/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go
index 8951a7d..b5d9369 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/pmmetrics/performance_metrics.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/pmmetrics/performance_metrics.go
@@ -17,7 +17,7 @@
 package pmmetrics
 
 import (
-	"github.com/opencord/voltha-protos/v2/go/voltha"
+	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 // PmMetrics structure holds metric and device info
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/ponresourcemanager/ponresourcemanager.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/ponresourcemanager/ponresourcemanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
index 0abe6c1..4587675 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/ponresourcemanager/ponresourcemanager.go
@@ -23,11 +23,11 @@
 	"fmt"
 	"strconv"
 
-	"github.com/boljen/go-bitmap"
-	"github.com/opencord/voltha-lib-go/v2/pkg/db"
-	"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	tp "github.com/opencord/voltha-lib-go/v2/pkg/techprofile"
+	bitmap "github.com/boljen/go-bitmap"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp "github.com/opencord/voltha-lib-go/v3/pkg/techprofile"
 )
 
 const (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
index 7e6dbf9..9f00953 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	"net/http"
 	"sync"
 )
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/4QueueHybridProfileMap1.json
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/README.md
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
similarity index 98%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
index b1a8ac5..2df7147 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/config.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
@@ -16,7 +16,7 @@
 package techprofile
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/db"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
 )
 
 // tech profile default constants
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
similarity index 99%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
index 3588838..0358291 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
@@ -23,10 +23,11 @@
 	"regexp"
 	"strconv"
 
-	"github.com/opencord/voltha-lib-go/v2/pkg/db"
-	"github.com/opencord/voltha-lib-go/v2/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v2/pkg/log"
-	tp_pb "github.com/opencord/voltha-protos/v2/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+
+	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
 )
 
 // Interface to pon resource manager APIs
diff --git a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
similarity index 93%
rename from vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
index cadca87..9184b5b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v2/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
@@ -17,8 +17,8 @@
 package techprofile
 
 import (
-	"github.com/opencord/voltha-lib-go/v2/pkg/db"
-	tp_pb "github.com/opencord/voltha-protos/v2/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
 )
 
 type TechProfileIf interface {