VOL-3677 - move to v4 of protos and lib

Change-Id: Ie9d215f69b798dbf2b879d8e7d041c0d671f84c0
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
deleted file mode 100644
index 20e1a52..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/core_proxy.go
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package common
-
-import (
-	"context"
-	"sync"
-
-	"github.com/golang/protobuf/ptypes"
-	a "github.com/golang/protobuf/ptypes/any"
-	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-type CoreProxy struct {
-	kafkaICProxy        kafka.InterContainerProxy
-	adapterTopic        string
-	coreTopic           string
-	deviceIdCoreMap     map[string]string
-	lockDeviceIdCoreMap sync.RWMutex
-}
-
-func NewCoreProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
-	var proxy CoreProxy
-	proxy.kafkaICProxy = kafkaProxy
-	proxy.adapterTopic = adapterTopic
-	proxy.coreTopic = coreTopic
-	proxy.deviceIdCoreMap = make(map[string]string)
-	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
-	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
-
-	return &proxy
-}
-
-func unPackResponse(rpc string, deviceId string, success bool, response *a.Any) error {
-	if success {
-		return nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
-	}
-}
-
-// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
-func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-	ap.deviceIdCoreMap[deviceId] = coreReference
-}
-
-// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
-func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-	delete(ap.deviceIdCoreMap, deviceId)
-}
-
-func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
-	ap.lockDeviceIdCoreMap.Lock()
-	defer ap.lockDeviceIdCoreMap.Unlock()
-
-	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
-		return kafka.Topic{Name: t}
-	}
-
-	return kafka.Topic{Name: ap.coreTopic}
-}
-
-func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
-	return kafka.Topic{Name: ap.adapterTopic}
-}
-
-func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
-	logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
-	rpc := "Register"
-	topic := kafka.Topic{Name: ap.coreTopic}
-	replyToTopic := ap.getAdapterTopic()
-	args := make([]*kafka.KVArg, 2)
-
-	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
-		log.Fatal("totalReplicas can't be 0, since you're here you have at least one")
-	}
-
-	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
-		log.Fatal("currentReplica can't be 0, it has to start from 1")
-	}
-
-	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
-		// if the adapter is not setting these fields they default to 0,
-		// in that case it means the adapter is not ready to be scaled and thus it defaults
-		// to a single instance
-		adapter.CurrentReplica = 1
-		adapter.TotalReplicas = 1
-	}
-
-	if adapter.CurrentReplica > adapter.TotalReplicas {
-		log.Fatalf("CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
-			adapter.CurrentReplica, adapter.TotalReplicas)
-	}
-
-	args[0] = &kafka.KVArg{
-		Key:   "adapter",
-		Value: adapter,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "deviceTypes",
-		Value: deviceTypes,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
-	logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(rpc, "", success, result)
-}
-
-func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
-	logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
-	rpc := "DeviceUpdate"
-	toTopic := ap.getCoreTopic(device.Id)
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "device",
-		Value: device,
-	}
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
-	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
-	return unPackResponse(rpc, device.Id, success, result)
-}
-
-func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
-	logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
-	rpc := "PortCreated"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "port",
-		Value: port,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
-	rpc := "PortsStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-	oStatus := &ic.IntType{Val: int64(operStatus)}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "oper_status",
-		Value: oStatus,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
-	logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
-	rpc := "DeleteAllPorts"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
-	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
-	rpc := "DeviceStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 3)
-	id := &voltha.ID{Id: deviceId}
-	oStatus := &ic.IntType{Val: int64(operStatus)}
-	cStatus := &ic.IntType{Val: int64(connStatus)}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "oper_status",
-		Value: oStatus,
-	}
-	args[2] = &kafka.KVArg{
-		Key:   "connect_status",
-		Value: cStatus,
-	}
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
-	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
-	logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
-	rpc := "ChildDeviceDetected"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 7)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-	ppn := &ic.IntType{Val: int64(parentPortNo)}
-	args[1] = &kafka.KVArg{
-		Key:   "parent_port_no",
-		Value: ppn,
-	}
-	cdt := &ic.StrType{Val: childDeviceType}
-	args[2] = &kafka.KVArg{
-		Key:   "child_device_type",
-		Value: cdt,
-	}
-	channel := &ic.IntType{Val: int64(channelId)}
-	args[3] = &kafka.KVArg{
-		Key:   "channel_id",
-		Value: channel,
-	}
-	vId := &ic.StrType{Val: vendorId}
-	args[4] = &kafka.KVArg{
-		Key:   "vendor_id",
-		Value: vId,
-	}
-	sNo := &ic.StrType{Val: serialNumber}
-	args[5] = &kafka.KVArg{
-		Key:   "serial_number",
-		Value: sNo,
-	}
-	oId := &ic.IntType{Val: int64(onuId)}
-	args[6] = &kafka.KVArg{
-		Key:   "onu_id",
-		Value: oId,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
-	}
-
-}
-
-func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
-	rpc := "ChildDevicesLost"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
-	rpc := "ChildDevicesDetected"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "parent_device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
-	logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
-	rpc := "GetDevice"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
-		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
-	logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
-	rpc := "GetChildDevice"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 4)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	var cnt uint8 = 0
-	for k, v := range kwargs {
-		cnt += 1
-		if k == "serial_number" {
-			val := &ic.StrType{Val: v.(string)}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		} else if k == "onu_id" {
-			val := &ic.IntType{Val: int64(v.(uint32))}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		} else if k == "parent_port_no" {
-			val := &ic.IntType{Val: int64(v.(uint32))}
-			args[cnt] = &kafka.KVArg{
-				Key:   k,
-				Value: val,
-			}
-		}
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevice := &voltha.Device{}
-		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevice, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
-	logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
-	rpc := "GetChildDevices"
-
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	id := &voltha.ID{Id: parentDeviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-
-	if success {
-		volthaDevices := &voltha.Devices{}
-		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-			return nil, status.Error(codes.InvalidArgument, err.Error())
-		}
-		return volthaDevices, nil
-	} else {
-		unpackResult := &ic.Error{}
-		var err error
-		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		}
-		logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
-
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
-	}
-}
-
-func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
-	logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
-	rpc := "PacketIn"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 3)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	portNo := &ic.IntType{Val: int64(port)}
-	args[1] = &kafka.KVArg{
-		Key:   "port",
-		Value: portNo,
-	}
-	pkt := &ic.Packet{Payload: pktPayload}
-	args[2] = &kafka.KVArg{
-		Key:   "packet",
-		Value: pkt,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
-	logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
-	rpc := "DeviceReasonUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 2)
-	id := &voltha.ID{Id: deviceId}
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: id,
-	}
-	reason := &ic.StrType{Val: deviceReason}
-	args[1] = &kafka.KVArg{
-		Key:   "device_reason",
-		Value: reason,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
-
-func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
-	logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
-	rpc := "DevicePMConfigUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(pmConfigs.Id)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := make([]*kafka.KVArg, 1)
-	args[0] = &kafka.KVArg{
-		Key:   "device_pm_config",
-		Value: pmConfigs,
-	}
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
-	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
-	return unPackResponse(rpc, pmConfigs.Id, success, result)
-}
-
-func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
-	rpc := "ReconcileChildDevices"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(parentDeviceId)
-	replyToTopic := ap.getAdapterTopic()
-
-	args := []*kafka.KVArg{
-		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
-	}
-
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(rpc, parentDeviceId, success, result)
-}
-
-func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
-	operStatus voltha.OperStatus_Types) error {
-	logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
-	rpc := "PortStateUpdate"
-	// Use a device specific topic to send the request.  The adapter handling the device creates a device
-	// specific topic
-	toTopic := ap.getCoreTopic(deviceId)
-	args := make([]*kafka.KVArg, 4)
-	deviceID := &voltha.ID{Id: deviceId}
-	portNo := &ic.IntType{Val: int64(portNum)}
-	portType := &ic.IntType{Val: int64(pType)}
-	oStatus := &ic.IntType{Val: int64(operStatus)}
-
-	args[0] = &kafka.KVArg{
-		Key:   "device_id",
-		Value: deviceID,
-	}
-	args[1] = &kafka.KVArg{
-		Key:   "oper_status",
-		Value: oStatus,
-	}
-	args[2] = &kafka.KVArg{
-		Key:   "port_type",
-		Value: portType,
-	}
-	args[3] = &kafka.KVArg{
-		Key:   "port_no",
-		Value: portNo,
-	}
-
-	// Use a device specific topic as we are the only adaptercore handling requests for this device
-	replyToTopic := ap.getAdapterTopic()
-	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(rpc, deviceId, success, result)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
deleted file mode 100644
index 1e81890..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/iAdapter.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package adapters
-
-import (
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
-	"github.com/opencord/voltha-protos/v3/go/openflow_13"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
-)
-
-//IAdapter represents the set of APIs a voltha adapter has to support.
-type IAdapter interface {
-	Adapter_descriptor() error
-	Device_types() (*voltha.DeviceTypes, error)
-	Health() (*voltha.HealthStatus, error)
-	Adopt_device(device *voltha.Device) error
-	Reconcile_device(device *voltha.Device) error
-	Abandon_device(device *voltha.Device) error
-	Disable_device(device *voltha.Device) error
-	Reenable_device(device *voltha.Device) error
-	Reboot_device(device *voltha.Device) error
-	Self_test_device(device *voltha.Device) error
-	Delete_device(device *voltha.Device) error
-	Get_device_details(device *voltha.Device) error
-	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
-	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
-	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
-	Suppress_event(filter *voltha.EventFilter) error
-	Unsuppress_event(filter *voltha.EventFilter) error
-	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
-	Process_inter_adapter_message(msg *ic.InterAdapterMessage) error
-	Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Enable_port(deviceId string, port *voltha.Port) error
-	Disable_port(deviceId string, port *voltha.Port) error
-	Child_device_lost(parentDeviceId string, parentPortNo uint32, onuID uint32) error
-	Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
-	Get_ext_value(deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
deleted file mode 100644
index cbde834..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/kafka_inter_container_library.go
+++ /dev/null
@@ -1,937 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kafka
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"reflect"
-	"strings"
-	"sync"
-	"time"
-
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/any"
-	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
-)
-
-const (
-	DefaultMaxRetries     = 3
-	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
-)
-
-const (
-	TransactionKey = "transactionID"
-	FromTopic      = "fromTopic"
-)
-
-var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
-var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
-
-// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
-// obtained from that channel, this interface is invoked.   This is used to handle
-// async requests into the Core via the kafka messaging bus
-type requestHandlerChannel struct {
-	requesthandlerInterface interface{}
-	ch                      <-chan *ic.InterContainerMessage
-}
-
-// transactionChannel represents a combination of a topic and a channel onto which a response received
-// on the kafka bus will be sent to
-type transactionChannel struct {
-	topic *Topic
-	ch    chan *ic.InterContainerMessage
-}
-
-type InterContainerProxy interface {
-	Start() error
-	Stop()
-	GetDefaultTopic() *Topic
-	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
-	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
-	SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error
-	SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error
-	UnSubscribeFromRequestHandler(topic Topic) error
-	DeleteTopic(topic Topic) error
-	EnableLivenessChannel(enable bool) chan bool
-	SendLiveness() error
-}
-
-// interContainerProxy represents the messaging proxy
-type interContainerProxy struct {
-	kafkaAddress                   string
-	defaultTopic                   *Topic
-	defaultRequestHandlerInterface interface{}
-	kafkaClient                    Client
-	doneCh                         chan struct{}
-	doneOnce                       sync.Once
-
-	// This map is used to map a topic to an interface and channel.   When a request is received
-	// on that channel (registered to the topic) then that interface is invoked.
-	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
-	lockTopicRequestHandlerChannelMap sync.RWMutex
-
-	// This map is used to map a channel to a response topic.   This channel handles all responses on that
-	// channel for that topic and forward them to the appropriate consumers channel, using the
-	// transactionIdToChannelMap.
-	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
-	lockTopicResponseChannelMap sync.RWMutex
-
-	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
-	// sent out and we are waiting for a response.
-	transactionIdToChannelMap     map[string]*transactionChannel
-	lockTransactionIdToChannelMap sync.RWMutex
-}
-
-type InterContainerProxyOption func(*interContainerProxy)
-
-func InterContainerAddress(address string) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.kafkaAddress = address
-	}
-}
-
-func DefaultTopic(topic *Topic) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.defaultTopic = topic
-	}
-}
-
-func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.defaultRequestHandlerInterface = handler
-	}
-}
-
-func MsgClient(client Client) InterContainerProxyOption {
-	return func(args *interContainerProxy) {
-		args.kafkaClient = client
-	}
-}
-
-func newInterContainerProxy(opts ...InterContainerProxyOption) *interContainerProxy {
-	proxy := &interContainerProxy{
-		kafkaAddress: DefaultKafkaAddress,
-		doneCh:       make(chan struct{}),
-	}
-
-	for _, option := range opts {
-		option(proxy)
-	}
-
-	return proxy
-}
-
-func NewInterContainerProxy(opts ...InterContainerProxyOption) InterContainerProxy {
-	return newInterContainerProxy(opts...)
-}
-
-func (kp *interContainerProxy) Start() error {
-	logger.Info("Starting-Proxy")
-
-	// Kafka MsgClient should already have been created.  If not, output fatal error
-	if kp.kafkaClient == nil {
-		logger.Fatal("kafka-client-not-set")
-	}
-
-	// Start the kafka client
-	if err := kp.kafkaClient.Start(); err != nil {
-		logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
-		return err
-	}
-
-	// Create the topic to response channel map
-	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
-	//
-	// Create the transactionId to Channel Map
-	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
-
-	// Create the topic to request channel map
-	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
-
-	return nil
-}
-
-func (kp *interContainerProxy) Stop() {
-	logger.Info("stopping-intercontainer-proxy")
-	kp.doneOnce.Do(func() { close(kp.doneCh) })
-	// TODO : Perform cleanup
-	kp.kafkaClient.Stop()
-	err := kp.deleteAllTopicRequestHandlerChannelMap()
-	if err != nil {
-		logger.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
-	}
-	err = kp.deleteAllTopicResponseChannelMap()
-	if err != nil {
-		logger.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
-	}
-	kp.deleteAllTransactionIdToChannelMap()
-}
-
-func (kp *interContainerProxy) GetDefaultTopic() *Topic {
-	return kp.defaultTopic
-}
-
-// InvokeAsyncRPC is used to make an RPC request asynchronously
-func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
-	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
-
-	logger.Debugw("InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
-	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
-	// typically the device ID.
-	responseTopic := replyToTopic
-	if responseTopic == nil {
-		responseTopic = kp.GetDefaultTopic()
-	}
-
-	chnl := make(chan *RpcResponse)
-
-	go func() {
-
-		// once we're done,
-		// close the response channel
-		defer close(chnl)
-
-		var err error
-		var protoRequest *ic.InterContainerMessage
-
-		// Encode the request
-		protoRequest, err = encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
-		if err != nil {
-			logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
-			chnl <- NewResponse(RpcFormattingError, err, nil)
-			return
-		}
-
-		// Subscribe for response, if needed, before sending request
-		var ch <-chan *ic.InterContainerMessage
-		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
-			chnl <- NewResponse(RpcTransportError, err, nil)
-			return
-		}
-
-		// Send request - if the topic is formatted with a device Id then we will send the request using a
-		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
-		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-		logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
-
-		// if the message is not sent on kafka publish an event an close the channel
-		if err = kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
-			chnl <- NewResponse(RpcTransportError, err, nil)
-			return
-		}
-
-		// if the client is not waiting for a response send the ack and close the channel
-		chnl <- NewResponse(RpcSent, nil, nil)
-		if !waitForResponse {
-			return
-		}
-
-		defer func() {
-			// Remove the subscription for a response on return
-			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
-				logger.Warnw("invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
-			}
-		}()
-
-		// Wait for response as well as timeout or cancellation
-		select {
-		case msg, ok := <-ch:
-			if !ok {
-				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
-				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
-			}
-			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			if responseBody, err := decodeResponse(msg); err != nil {
-				chnl <- NewResponse(RpcReply, err, nil)
-			} else {
-				if responseBody.Success {
-					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
-				} else {
-					// response body contains an error
-					unpackErr := &ic.Error{}
-					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
-						chnl <- NewResponse(RpcReply, err, nil)
-					} else {
-						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
-					}
-				}
-			}
-		case <-ctx.Done():
-			logger.Errorw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
-			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
-			chnl <- NewResponse(RpcTimeout, err, nil)
-		case <-kp.doneCh:
-			chnl <- NewResponse(RpcSystemClosing, nil, nil)
-			logger.Warnw("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
-		}
-	}()
-	return chnl
-}
-
-// InvokeRPC is used to send a request to a given topic
-func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
-	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
-
-	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
-	// typically the device ID.
-	responseTopic := replyToTopic
-	if responseTopic == nil {
-		responseTopic = kp.defaultTopic
-	}
-
-	// Encode the request
-	protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
-	if err != nil {
-		logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
-		return false, nil
-	}
-
-	// Subscribe for response, if needed, before sending request
-	var ch <-chan *ic.InterContainerMessage
-	if waitForResponse {
-		var err error
-		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
-		}
-	}
-
-	// Send request - if the topic is formatted with a device Id then we will send the request using a
-	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
-	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-	//key := GetDeviceIdFromTopic(*toTopic)
-	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
-	go func() {
-		if err := kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
-			logger.Errorw("send-failed", log.Fields{
-				"topic": toTopic,
-				"key":   key,
-				"error": err})
-		}
-	}()
-
-	if waitForResponse {
-		// Create a child context based on the parent context, if any
-		var cancel context.CancelFunc
-		childCtx := context.Background()
-		if ctx == nil {
-			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
-		} else {
-			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
-		}
-		defer cancel()
-
-		// Wait for response as well as timeout or cancellation
-		// Remove the subscription for a response on return
-		defer func() {
-			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
-				logger.Errorw("response-unsubscribe-failed", log.Fields{
-					"id":    protoRequest.Header.Id,
-					"error": err})
-			}
-		}()
-		select {
-		case msg, ok := <-ch:
-			if !ok {
-				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
-				protoError := &ic.Error{Reason: "channel-closed"}
-				var marshalledArg *any.Any
-				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-					return false, nil // Should never happen
-				}
-				return false, marshalledArg
-			}
-			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			var responseBody *ic.InterContainerResponseBody
-			var err error
-			if responseBody, err = decodeResponse(msg); err != nil {
-				logger.Errorw("decode-response-error", log.Fields{"error": err})
-				// FIXME we should return something
-			}
-			return responseBody.Success, responseBody.Result
-		case <-ctx.Done():
-			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
-			//	 pack the error as proto any type
-			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
-
-			var marshalledArg *any.Any
-			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-				return false, nil // Should never happen
-			}
-			return false, marshalledArg
-		case <-childCtx.Done():
-			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
-			//	 pack the error as proto any type
-			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
-
-			var marshalledArg *any.Any
-			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
-				return false, nil // Should never happen
-			}
-			return false, marshalledArg
-		case <-kp.doneCh:
-			logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
-			return true, nil
-		}
-	}
-	return true, nil
-}
-
-// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
-// when a message is received on a given topic
-func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error {
-
-	// Subscribe to receive messages for that topic
-	var ch <-chan *ic.InterContainerMessage
-	var err error
-	if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
-		//if ch, err = kp.Subscribe(topic); err != nil {
-		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
-		return err
-	}
-
-	kp.defaultRequestHandlerInterface = handler
-	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
-	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ch, topic, handler)
-
-	return nil
-}
-
-// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
-// when a message is received on a given topic.  So far there is only 1 target registered per microservice
-func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error {
-	// Subscribe to receive messages for that topic
-	var ch <-chan *ic.InterContainerMessage
-	var err error
-	if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
-		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
-		return err
-	}
-	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
-
-	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ch, topic, kp.defaultRequestHandlerInterface)
-
-	return nil
-}
-
-func (kp *interContainerProxy) UnSubscribeFromRequestHandler(topic Topic) error {
-	return kp.deleteFromTopicRequestHandlerChannelMap(topic.Name)
-}
-
-func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(topic string) error {
-	kp.lockTopicResponseChannelMap.Lock()
-	defer kp.lockTopicResponseChannelMap.Unlock()
-	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
-		// Unsubscribe to this topic first - this will close the subscribed channel
-		var err error
-		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
-		}
-		delete(kp.topicToResponseChannelMap, topic)
-		return err
-	} else {
-		return fmt.Errorf("%s-Topic-not-found", topic)
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTopicResponseChannelMap() error {
-	logger.Debug("delete-all-topic-response-channel")
-	kp.lockTopicResponseChannelMap.Lock()
-	defer kp.lockTopicResponseChannelMap.Unlock()
-	var unsubscribeFailTopics []string
-	for topic := range kp.topicToResponseChannelMap {
-		// Unsubscribe to this topic first - this will close the subscribed channel
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
-			// Do not return. Continue to try to unsubscribe to other topics.
-		} else {
-			// Only delete from channel map if successfully unsubscribed.
-			delete(kp.topicToResponseChannelMap, topic)
-		}
-	}
-	if len(unsubscribeFailTopics) > 0 {
-		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
-	}
-	return nil
-}
-
-func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
-		kp.topicToRequestHandlerChannelMap[topic] = arg
-	}
-}
-
-func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(topic string) error {
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
-		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
-			return err
-		}
-		delete(kp.topicToRequestHandlerChannelMap, topic)
-		return nil
-	} else {
-		return fmt.Errorf("%s-Topic-not-found", topic)
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap() error {
-	logger.Debug("delete-all-topic-request-channel")
-	kp.lockTopicRequestHandlerChannelMap.Lock()
-	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
-	var unsubscribeFailTopics []string
-	for topic := range kp.topicToRequestHandlerChannelMap {
-		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
-			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
-			// Do not return. Continue to try to unsubscribe to other topics.
-		} else {
-			// Only delete from channel map if successfully unsubscribed.
-			delete(kp.topicToRequestHandlerChannelMap, topic)
-		}
-	}
-	if len(unsubscribeFailTopics) > 0 {
-		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
-	}
-	return nil
-}
-
-func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
-		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
-	}
-}
-
-func (kp *interContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
-		// Close the channel first
-		close(transChannel.ch)
-		delete(kp.transactionIdToChannelMap, id)
-	}
-}
-
-func (kp *interContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	for key, value := range kp.transactionIdToChannelMap {
-		if value.topic.Name == id {
-			close(value.ch)
-			delete(kp.transactionIdToChannelMap, key)
-		}
-	}
-}
-
-// nolint: unused
-func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap() {
-	logger.Debug("delete-all-transaction-id-channel-map")
-	kp.lockTransactionIdToChannelMap.Lock()
-	defer kp.lockTransactionIdToChannelMap.Unlock()
-	for key, value := range kp.transactionIdToChannelMap {
-		close(value.ch)
-		delete(kp.transactionIdToChannelMap, key)
-	}
-}
-
-func (kp *interContainerProxy) DeleteTopic(topic Topic) error {
-	// If we have any consumers on that topic we need to close them
-	if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
-		logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
-	}
-	if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
-		logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
-	}
-	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
-
-	return kp.kafkaClient.DeleteTopic(&topic)
-}
-
-func encodeReturnedValue(returnedVal interface{}) (*any.Any, error) {
-	// Encode the response argument - needs to be a proto message
-	if returnedVal == nil {
-		return nil, nil
-	}
-	protoValue, ok := returnedVal.(proto.Message)
-	if !ok {
-		logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
-		err := errors.New("response-value-not-proto-message")
-		return nil, err
-	}
-
-	// Marshal the returned value, if any
-	var marshalledReturnedVal *any.Any
-	var err error
-	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
-		logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
-		return nil, err
-	}
-	return marshalledReturnedVal, nil
-}
-
-func encodeDefaultFailedResponse(request *ic.InterContainerMessage) *ic.InterContainerMessage {
-	responseHeader := &ic.Header{
-		Id:        request.Header.Id,
-		Type:      ic.MessageType_RESPONSE,
-		FromTopic: request.Header.ToTopic,
-		ToTopic:   request.Header.FromTopic,
-		Timestamp: ptypes.TimestampNow(),
-	}
-	responseBody := &ic.InterContainerResponseBody{
-		Success: false,
-		Result:  nil,
-	}
-	var marshalledResponseBody *any.Any
-	var err error
-	// Error should never happen here
-	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
-	}
-
-	return &ic.InterContainerMessage{
-		Header: responseHeader,
-		Body:   marshalledResponseBody,
-	}
-
-}
-
-//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
-//or an error on failure
-func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
-	//logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
-	responseHeader := &ic.Header{
-		Id:        request.Header.Id,
-		Type:      ic.MessageType_RESPONSE,
-		FromTopic: request.Header.ToTopic,
-		ToTopic:   request.Header.FromTopic,
-		KeyTopic:  request.Header.KeyTopic,
-		Timestamp: ptypes.TimestampNow(),
-	}
-
-	// Go over all returned values
-	var marshalledReturnedVal *any.Any
-	var err error
-
-	// for now we support only 1 returned value - (excluding the error)
-	if len(returnedValues) > 0 {
-		if marshalledReturnedVal, err = encodeReturnedValue(returnedValues[0]); err != nil {
-			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
-		}
-	}
-
-	responseBody := &ic.InterContainerResponseBody{
-		Success: success,
-		Result:  marshalledReturnedVal,
-	}
-
-	// Marshal the response body
-	var marshalledResponseBody *any.Any
-	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
-		return nil, err
-	}
-
-	return &ic.InterContainerMessage{
-		Header: responseHeader,
-		Body:   marshalledResponseBody,
-	}, nil
-}
-
-func CallFuncByName(myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
-	myClassValue := reflect.ValueOf(myClass)
-	// Capitalize the first letter in the funcName to workaround the first capital letters required to
-	// invoke a function from a different package
-	funcName = strings.Title(funcName)
-	m := myClassValue.MethodByName(funcName)
-	if !m.IsValid() {
-		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
-	}
-	in := make([]reflect.Value, len(params))
-	for i, param := range params {
-		in[i] = reflect.ValueOf(param)
-	}
-	out = m.Call(in)
-	return
-}
-
-func (kp *interContainerProxy) addTransactionId(transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
-	arg := &KVArg{
-		Key:   TransactionKey,
-		Value: &ic.StrType{Val: transactionId},
-	}
-
-	var marshalledArg *any.Any
-	var err error
-	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
-		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
-		return currentArgs
-	}
-	protoArg := &ic.Argument{
-		Key:   arg.Key,
-		Value: marshalledArg,
-	}
-	return append(currentArgs, protoArg)
-}
-
-func (kp *interContainerProxy) addFromTopic(fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
-	var marshalledArg *any.Any
-	var err error
-	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
-		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
-		return currentArgs
-	}
-	protoArg := &ic.Argument{
-		Key:   FromTopic,
-		Value: marshalledArg,
-	}
-	return append(currentArgs, protoArg)
-}
-
-func (kp *interContainerProxy) handleMessage(msg *ic.InterContainerMessage, targetInterface interface{}) {
-
-	// First extract the header to know whether this is a request - responses are handled by a different handler
-	if msg.Header.Type == ic.MessageType_REQUEST {
-		var out []reflect.Value
-		var err error
-
-		// Get the request body
-		requestBody := &ic.InterContainerRequestBody{}
-		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
-			logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
-		} else {
-			logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
-			// let the callee unpack the arguments as its the only one that knows the real proto type
-			// Augment the requestBody with the message Id as it will be used in scenarios where cores
-			// are set in pairs and competing
-			requestBody.Args = kp.addTransactionId(msg.Header.Id, requestBody.Args)
-
-			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
-			// needs to send an unsollicited message to the currently requested container
-			requestBody.Args = kp.addFromTopic(msg.Header.FromTopic, requestBody.Args)
-
-			out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
-			if err != nil {
-				logger.Warn(err)
-			}
-		}
-		// Response required?
-		if requestBody.ResponseRequired {
-			// If we already have an error before then just return that
-			var returnError *ic.Error
-			var returnedValues []interface{}
-			var success bool
-			if err != nil {
-				returnError = &ic.Error{Reason: err.Error()}
-				returnedValues = make([]interface{}, 1)
-				returnedValues[0] = returnError
-			} else {
-				returnedValues = make([]interface{}, 0)
-				// Check for errors first
-				lastIndex := len(out) - 1
-				if out[lastIndex].Interface() != nil { // Error
-					if retError, ok := out[lastIndex].Interface().(error); ok {
-						if retError.Error() == ErrorTransactionNotAcquired.Error() {
-							logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
-							return // Ignore - process is in competing mode and ignored transaction
-						}
-						returnError = &ic.Error{Reason: retError.Error()}
-						returnedValues = append(returnedValues, returnError)
-					} else { // Should never happen
-						returnError = &ic.Error{Reason: "incorrect-error-returns"}
-						returnedValues = append(returnedValues, returnError)
-					}
-				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
-					logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
-					return // Ignore - should not happen
-				} else { // Non-error case
-					success = true
-					for idx, val := range out {
-						//logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
-						if idx != lastIndex {
-							returnedValues = append(returnedValues, val.Interface())
-						}
-					}
-				}
-			}
-
-			var icm *ic.InterContainerMessage
-			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
-				logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
-				icm = encodeDefaultFailedResponse(msg)
-			}
-			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
-			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
-			// present then the key will be empty, hence all messages for a given topic will be sent to all
-			// partitions.
-			replyTopic := &Topic{Name: msg.Header.FromTopic}
-			key := msg.Header.KeyTopic
-			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
-			// TODO: handle error response.
-			go func() {
-				if err := kp.kafkaClient.Send(icm, replyTopic, key); err != nil {
-					logger.Errorw("send-reply-failed", log.Fields{
-						"topic": replyTopic,
-						"key":   key,
-						"error": err})
-				}
-			}()
-		}
-	} else if msg.Header.Type == ic.MessageType_RESPONSE {
-		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
-		go kp.dispatchResponse(msg)
-	} else {
-		logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
-	}
-}
-
-func (kp *interContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
-	//	Wait for messages
-	for msg := range ch {
-		//logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
-		go kp.handleMessage(msg, targetInterface)
-	}
-}
-
-func (kp *interContainerProxy) dispatchResponse(msg *ic.InterContainerMessage) {
-	kp.lockTransactionIdToChannelMap.RLock()
-	defer kp.lockTransactionIdToChannelMap.RUnlock()
-	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
-		logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
-		return
-	}
-	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
-}
-
-// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
-// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
-// API. There is one response channel waiting for kafka messages before dispatching the message to the
-// corresponding waiting channel
-func (kp *interContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
-	logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
-
-	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
-	// broadcast any message for this topic to all channels waiting on it.
-	// Set channel size to 1 to prevent deadlock, see VOL-2708
-	ch := make(chan *ic.InterContainerMessage, 1)
-	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
-
-	return ch, nil
-}
-
-func (kp *interContainerProxy) unSubscribeForResponse(trnsId string) error {
-	logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
-	kp.deleteFromTransactionIdToChannelMap(trnsId)
-	return nil
-}
-
-func (kp *interContainerProxy) EnableLivenessChannel(enable bool) chan bool {
-	return kp.kafkaClient.EnableLivenessChannel(enable)
-}
-
-func (kp *interContainerProxy) EnableHealthinessChannel(enable bool) chan bool {
-	return kp.kafkaClient.EnableHealthinessChannel(enable)
-}
-
-func (kp *interContainerProxy) SendLiveness() error {
-	return kp.kafkaClient.SendLiveness()
-}
-
-//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
-//or an error on failure
-func encodeRequest(rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
-	requestHeader := &ic.Header{
-		Id:        uuid.New().String(),
-		Type:      ic.MessageType_REQUEST,
-		FromTopic: replyTopic.Name,
-		ToTopic:   toTopic.Name,
-		KeyTopic:  key,
-		Timestamp: ptypes.TimestampNow(),
-	}
-	requestBody := &ic.InterContainerRequestBody{
-		Rpc:              rpc,
-		ResponseRequired: true,
-		ReplyToTopic:     replyTopic.Name,
-	}
-
-	for _, arg := range kvArgs {
-		if arg == nil {
-			// In case the caller sends an array with empty args
-			continue
-		}
-		var marshalledArg *any.Any
-		var err error
-		// ascertain the value interface type is a proto.Message
-		protoValue, ok := arg.Value.(proto.Message)
-		if !ok {
-			logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
-			err := errors.New("argument-value-not-proto-message")
-			return nil, err
-		}
-		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
-			logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
-			return nil, err
-		}
-		protoArg := &ic.Argument{
-			Key:   arg.Key,
-			Value: marshalledArg,
-		}
-		requestBody.Args = append(requestBody.Args, protoArg)
-	}
-
-	var marshalledData *any.Any
-	var err error
-	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
-		logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
-		return nil, err
-	}
-	request := &ic.InterContainerMessage{
-		Header: requestHeader,
-		Body:   marshalledData,
-	}
-	return request, nil
-}
-
-func decodeResponse(response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
-	//	Extract the message body
-	responseBody := ic.InterContainerResponseBody{}
-	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
-		logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
-		return nil, err
-	}
-	//logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
-
-	return &responseBody, nil
-
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
deleted file mode 100644
index b47b562..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log_classic.go
+++ /dev/null
@@ -1,386 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Older Version of Logger interface without support of Context Injection
-// This is Depreciated and should not be used anymore. Instead use CLogger
-// defined in log.go file.
-// This file will be deleted once all code files of voltha compopnents have been
-// changed to use new CLogger interface methods supporting context injection
-package log
-
-import (
-	zp "go.uber.org/zap"
-)
-
-// Logger represents an abstract logging interface.  Any logging implementation used
-// will need to abide by this interface
-type Logger interface {
-	Debug(...interface{})
-	Debugln(...interface{})
-	Debugf(string, ...interface{})
-	Debugw(string, Fields)
-
-	Info(...interface{})
-	Infoln(...interface{})
-	Infof(string, ...interface{})
-	Infow(string, Fields)
-
-	Warn(...interface{})
-	Warnln(...interface{})
-	Warnf(string, ...interface{})
-	Warnw(string, Fields)
-
-	Error(...interface{})
-	Errorln(...interface{})
-	Errorf(string, ...interface{})
-	Errorw(string, Fields)
-
-	Fatal(...interface{})
-	Fatalln(...interface{})
-	Fatalf(string, ...interface{})
-	Fatalw(string, Fields)
-
-	With(Fields) Logger
-
-	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
-	//
-	Warning(...interface{})
-	Warningln(...interface{})
-	Warningf(string, ...interface{})
-
-	// V reports whether verbosity level l is at least the requested verbose level.
-	V(l LogLevel) bool
-
-	//Returns the log level of this specific logger
-	GetLogLevel() LogLevel
-}
-
-// logger has been refactored to be a thin wrapper on clogger implementation to support
-// all existing log statements during transition to new clogger
-type logger struct {
-	cl *clogger
-}
-
-func AddPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (Logger, error) {
-	// Get package name of caller method and pass further on; else this method is considered caller
-	pkgName, _, _, _ := getCallerInfo()
-
-	pkgNames = append(pkgNames, pkgName)
-	clg, err := RegisterPackage(outputType, level, defaultFields, pkgNames...)
-	if err != nil {
-		return nil, err
-	}
-
-	return logger{cl: clg.(*clogger)}, nil
-}
-
-func getPackageLevelSugaredLogger() *zp.SugaredLogger {
-	pkgName, _, _, _ := getCallerInfo()
-	if _, exist := loggers[pkgName]; exist {
-		return loggers[pkgName].log
-	}
-	return defaultLogger.log
-}
-
-func getPackageLevelLogger() CLogger {
-	pkgName, _, _, _ := getCallerInfo()
-	if _, exist := loggers[pkgName]; exist {
-		return loggers[pkgName]
-	}
-	return defaultLogger
-}
-
-// With returns a logger initialized with the key-value pairs
-func (l logger) With(keysAndValues Fields) Logger {
-	return logger{cl: &clogger{log: l.cl.log.With(serializeMap(keysAndValues)...), parent: l.cl.parent}}
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func (l logger) Debug(args ...interface{}) {
-	l.cl.log.Debug(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
-func (l logger) Debugln(args ...interface{}) {
-	l.cl.log.Debug(args...)
-}
-
-// Debugw logs a message at level Debug on the standard logger.
-func (l logger) Debugf(format string, args ...interface{}) {
-	l.cl.log.Debugf(format, args...)
-}
-
-// Debugw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Debugw(msg string, keysAndValues Fields) {
-	if l.V(DebugLevel) {
-		l.cl.log.Debugw(msg, serializeMap(keysAndValues)...)
-	}
-}
-
-// Info logs a message at level Info on the standard logger.
-func (l logger) Info(args ...interface{}) {
-	l.cl.log.Info(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
-func (l logger) Infoln(args ...interface{}) {
-	l.cl.log.Info(args...)
-	//msg := fmt.Sprintln(args...)
-	//l.sourced().Info(msg[:len(msg)-1])
-}
-
-// Infof logs a message at level Info on the standard logger.
-func (l logger) Infof(format string, args ...interface{}) {
-	l.cl.log.Infof(format, args...)
-}
-
-// Infow logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Infow(msg string, keysAndValues Fields) {
-	if l.V(InfoLevel) {
-		l.cl.log.Infow(msg, serializeMap(keysAndValues)...)
-	}
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func (l logger) Warn(args ...interface{}) {
-	l.cl.log.Warn(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
-func (l logger) Warnln(args ...interface{}) {
-	l.cl.log.Warn(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func (l logger) Warnf(format string, args ...interface{}) {
-	l.cl.log.Warnf(format, args...)
-}
-
-// Warnw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Warnw(msg string, keysAndValues Fields) {
-	if l.V(WarnLevel) {
-		l.cl.log.Warnw(msg, serializeMap(keysAndValues)...)
-	}
-}
-
-// Error logs a message at level Error on the standard logger.
-func (l logger) Error(args ...interface{}) {
-	l.cl.log.Error(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
-func (l logger) Errorln(args ...interface{}) {
-	l.cl.log.Error(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func (l logger) Errorf(format string, args ...interface{}) {
-	l.cl.log.Errorf(format, args...)
-}
-
-// Errorw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Errorw(msg string, keysAndValues Fields) {
-	if l.V(ErrorLevel) {
-		l.cl.log.Errorw(msg, serializeMap(keysAndValues)...)
-	}
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func (l logger) Fatal(args ...interface{}) {
-	l.cl.log.Fatal(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
-func (l logger) Fatalln(args ...interface{}) {
-	l.cl.log.Fatal(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func (l logger) Fatalf(format string, args ...interface{}) {
-	l.cl.log.Fatalf(format, args...)
-}
-
-// Fatalw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func (l logger) Fatalw(msg string, keysAndValues Fields) {
-	if l.V(FatalLevel) {
-		l.cl.log.Fatalw(msg, serializeMap(keysAndValues)...)
-	}
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func (l logger) Warning(args ...interface{}) {
-	l.cl.log.Warn(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
-func (l logger) Warningln(args ...interface{}) {
-	l.cl.log.Warn(args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func (l logger) Warningf(format string, args ...interface{}) {
-	l.cl.log.Warnf(format, args...)
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func (l logger) V(level LogLevel) bool {
-	return l.cl.parent.Core().Enabled(logLevelToLevel(level))
-}
-
-// GetLogLevel returns the current level of the logger
-func (l logger) GetLogLevel() LogLevel {
-	return levelToLogLevel(cfgs[l.cl.packageName].Level.Level())
-}
-
-// With returns a logger initialized with the key-value pairs
-func With(keysAndValues Fields) Logger {
-	return logger{cl: &clogger{log: getPackageLevelSugaredLogger().With(serializeMap(keysAndValues)...), parent: defaultLogger.parent}}
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
-	getPackageLevelSugaredLogger().Debug(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Debug(args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Debugf(format, args...)
-}
-
-// Debugw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Debugw(msg string, keysAndValues Fields) {
-	getPackageLevelSugaredLogger().Debugw(msg, serializeMap(keysAndValues)...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
-	getPackageLevelSugaredLogger().Info(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Info(args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Infof(format, args...)
-}
-
-//Infow logs a message with some additional context. The variadic key-value
-//pairs are treated as they are in With.
-func Infow(msg string, keysAndValues Fields) {
-	getPackageLevelSugaredLogger().Infow(msg, serializeMap(keysAndValues)...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
-	getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Warnf(format, args...)
-}
-
-// Warnw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Warnw(msg string, keysAndValues Fields) {
-	getPackageLevelSugaredLogger().Warnw(msg, serializeMap(keysAndValues)...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
-	getPackageLevelSugaredLogger().Error(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Error(args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Errorf(format, args...)
-}
-
-// Errorw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Errorw(msg string, keysAndValues Fields) {
-	getPackageLevelSugaredLogger().Errorw(msg, serializeMap(keysAndValues)...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger.
-func Fatal(args ...interface{}) {
-	getPackageLevelSugaredLogger().Fatal(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger.
-func Fatalln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Fatal(args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger.
-func Fatalf(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Fatalf(format, args...)
-}
-
-// Fatalw logs a message with some additional context. The variadic key-value
-// pairs are treated as they are in With.
-func Fatalw(msg string, keysAndValues Fields) {
-	getPackageLevelSugaredLogger().Fatalw(msg, serializeMap(keysAndValues)...)
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func Warning(args ...interface{}) {
-	getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger.
-func Warningln(args ...interface{}) {
-	getPackageLevelSugaredLogger().Warn(args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func Warningf(format string, args ...interface{}) {
-	getPackageLevelSugaredLogger().Warnf(format, args...)
-}
-
-// V reports whether verbosity level l is at least the requested verbose level.
-func V(level LogLevel) bool {
-	return getPackageLevelLogger().V(level)
-}
-
-//GetLogLevel returns the log level of the invoking package
-func GetLogLevel() LogLevel {
-	return getPackageLevelLogger().GetLogLevel()
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go
deleted file mode 100644
index 1869b1a..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/utils.go
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// File contains utility functions to support Open Tracing in conjunction with
-// Enhanced Logging based on context propagation
-
-package log
-
-import (
-	"context"
-	"errors"
-	"github.com/opentracing/opentracing-go"
-	jtracing "github.com/uber/jaeger-client-go"
-	jcfg "github.com/uber/jaeger-client-go/config"
-	jmetrics "github.com/uber/jaeger-lib/metrics"
-	"io"
-	"os"
-)
-
-// This method will start the Tracing for a component using Component name injected from the Chart
-// The close() method on returned Closer instance should be called in defer mode to gracefully
-// terminate tracing on component shutdown
-func StartTracing() (io.Closer, error) {
-	componentName := os.Getenv("COMPONENT_NAME")
-	if componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
-	}
-
-	// Use basic configuration to start with; will extend later to support dynamic config updates
-	cfg := jcfg.Configuration{}
-
-	jLoggerCfgOption := jcfg.Logger(jtracing.StdLogger)
-	jMetricsFactoryCfgOption := jcfg.Metrics(jmetrics.NullFactory)
-
-	return cfg.InitGlobalTracer(componentName, jLoggerCfgOption, jMetricsFactoryCfgOption)
-}
-
-// Extracts details of Execution Context as log fields from the Tracing Span injected into the
-// context instance. Following log fields are extracted:
-// 1. Operation Name : key as 'op-name' and value as Span operation name
-// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
-//
-// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
-//
-// If no Span is found associated with context, blank slice is returned without any log fields
-func ExtractContextAttributes(ctx context.Context) []interface{} {
-	attrMap := make(map[string]interface{})
-
-	if ctx != nil {
-		if span := opentracing.SpanFromContext(ctx); span != nil {
-			if jspan, ok := span.(*jtracing.Span); ok {
-				opname := jspan.OperationName()
-				spanid := jspan.SpanContext().SpanID().String()
-
-				attrMap["op-id"] = spanid
-				attrMap["op-name"] = opname
-
-				for k, v := range jspan.Tags() {
-					attrMap[k] = v
-				}
-			}
-		}
-	}
-
-	return serializeMap(attrMap)
-}
-
-// Utility method to convert log Fields into array of interfaces expected by zap logger methods
-func serializeMap(fields Fields) []interface{} {
-	data := make([]interface{}, len(fields)*2)
-	i := 0
-	for k, v := range fields {
-		data[i] = k
-		data[i+1] = v
-		i = i + 2
-	}
-	return data
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
similarity index 93%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
index de5cfc0..30fcead 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/adapter_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/adapter_proxy_if.go
@@ -20,7 +20,7 @@
 	"context"
 
 	"github.com/golang/protobuf/proto"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 )
 
 // AdapterProxy interface for AdapterProxy implementation.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go
similarity index 86%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go
index 9636a7d..36939bd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/core_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/core_proxy_if.go
@@ -18,21 +18,21 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
 // CoreProxy interface for voltha-go coreproxy.
 type CoreProxy interface {
 	UpdateCoreReference(deviceID string, coreReference string)
 	DeleteCoreReference(deviceID string)
-	// getCoreTopic(deviceID string) kafka.Topic
-	//GetAdapterTopic(args ...string) kafka.Topic
-	// getAdapterTopic(args ...string) kafka.Topic
 	RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error
 	DeviceUpdate(ctx context.Context, device *voltha.Device) error
 	PortCreated(ctx context.Context, deviceID string, port *voltha.Port) error
-	PortsStateUpdate(ctx context.Context, deviceID string, operStatus voltha.OperStatus_Types) error
+	PortsStateUpdate(ctx context.Context, deviceID string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error
 	DeleteAllPorts(ctx context.Context, deviceID string) error
+	GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error)
+	ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error)
 	DeviceStateUpdate(ctx context.Context, deviceID string,
 		connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/events_proxy_if.go
similarity index 79%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/events_proxy_if.go
index c144935..7d8a053 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif/events_proxy_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif/events_proxy_if.go
@@ -17,14 +17,15 @@
 package adapterif
 
 import (
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"context"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
 // EventProxy interface for eventproxy
 type EventProxy interface {
-	SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category EventCategory,
+	SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
-	SendKpiEvent(id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+	SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
similarity index 69%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
index cd5750f..9ade0d1 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/adapter_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/adapter_proxy.go
@@ -17,32 +17,30 @@
 
 import (
 	"context"
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
 
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/any"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 )
 
 type AdapterProxy struct {
 	kafkaICProxy kafka.InterContainerProxy
-	adapterTopic string
 	coreTopic    string
 	endpointMgr  kafka.EndpointManager
 }
 
-func NewAdapterProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
+func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, coreTopic string, backend *db.Backend) *AdapterProxy {
 	proxy := AdapterProxy{
 		kafkaICProxy: kafkaProxy,
-		adapterTopic: adapterTopic,
 		coreTopic:    coreTopic,
 		endpointMgr:  kafka.NewEndpointManager(backend),
 	}
-	logger.Debugw("topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic})
 	return &proxy
 }
 
@@ -54,14 +52,20 @@
 	toDeviceId string,
 	proxyDeviceId string,
 	messageId string) error {
-	logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
 		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
 
 	//Marshal the message
 	var marshalledMsg *any.Any
 	var err error
 	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
+		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+		return err
+	}
+
+	// Set up the required rpc arguments
+	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	if err != nil {
 		return err
 	}
 
@@ -69,7 +73,7 @@
 	header := &ic.InterAdapterHeader{
 		Type:          msgType,
 		FromTopic:     fromAdapter,
-		ToTopic:       toAdapter,
+		ToTopic:       string(endpoint),
 		ToDeviceId:    toDeviceId,
 		ProxyDeviceId: proxyDeviceId,
 	}
@@ -89,16 +93,13 @@
 		Value: iaMsg,
 	}
 
-	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(toDeviceId, toAdapter)
-	if err != nil {
-		return err
-	}
 	topic := kafka.Topic{Name: string(endpoint)}
 	replyToTopic := kafka.Topic{Name: fromAdapter}
 	rpc := "process_inter_adapter_message"
 
+	// Add a indication in context to differentiate this Inter Adapter message during Span processing in Kafka IC proxy
+	ctx = context.WithValue(ctx, "inter-adapter-msg-type", msgType)
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(rpc, "", success, result)
+	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
index 95a036d..5d7d7f8 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/common.go
@@ -16,15 +16,15 @@
 package common
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
new file mode 100644
index 0000000..1077226
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/core_proxy.go
@@ -0,0 +1,689 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package common
+
+import (
+	"context"
+	"sync"
+
+	"github.com/golang/protobuf/ptypes"
+	a "github.com/golang/protobuf/ptypes/any"
+	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+type CoreProxy struct {
+	kafkaICProxy        kafka.InterContainerProxy
+	adapterTopic        string
+	coreTopic           string
+	deviceIdCoreMap     map[string]string
+	lockDeviceIdCoreMap sync.RWMutex
+}
+
+func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+	var proxy CoreProxy
+	proxy.kafkaICProxy = kafkaProxy
+	proxy.adapterTopic = adapterTopic
+	proxy.coreTopic = coreTopic
+	proxy.deviceIdCoreMap = make(map[string]string)
+	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
+	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+
+	return &proxy
+}
+
+func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
+	if success {
+		return nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "device-id": deviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
+	}
+}
+
+// UpdateCoreReference adds or update a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) UpdateCoreReference(deviceId string, coreReference string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	ap.deviceIdCoreMap[deviceId] = coreReference
+}
+
+// DeleteCoreReference removes a core reference (really the topic name) for a given device Id
+func (ap *CoreProxy) DeleteCoreReference(deviceId string) {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+	delete(ap.deviceIdCoreMap, deviceId)
+}
+
+func (ap *CoreProxy) getCoreTopic(deviceId string) kafka.Topic {
+	ap.lockDeviceIdCoreMap.Lock()
+	defer ap.lockDeviceIdCoreMap.Unlock()
+
+	if t, exist := ap.deviceIdCoreMap[deviceId]; exist {
+		return kafka.Topic{Name: t}
+	}
+
+	return kafka.Topic{Name: ap.coreTopic}
+}
+
+func (ap *CoreProxy) getAdapterTopic(args ...string) kafka.Topic {
+	return kafka.Topic{Name: ap.adapterTopic}
+}
+
+func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
+	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	rpc := "Register"
+	topic := kafka.Topic{Name: ap.coreTopic}
+	replyToTopic := ap.getAdapterTopic()
+	args := make([]*kafka.KVArg, 2)
+
+	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
+		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
+		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
+	}
+
+	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
+		// if the adapter is not setting these fields they default to 0,
+		// in that case it means the adapter is not ready to be scaled and thus it defaults
+		// to a single instance
+		adapter.CurrentReplica = 1
+		adapter.TotalReplicas = 1
+	}
+
+	if adapter.CurrentReplica > adapter.TotalReplicas {
+		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+			adapter.CurrentReplica, adapter.TotalReplicas)
+	}
+
+	args[0] = &kafka.KVArg{
+		Key:   "adapter",
+		Value: adapter,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "deviceTypes",
+		Value: deviceTypes,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
+	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(ctx, rpc, "", success, result)
+}
+
+func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
+	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"device-id": device.Id})
+	rpc := "DeviceUpdate"
+	toTopic := ap.getCoreTopic(device.Id)
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device",
+		Value: device,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
+	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"device-id": device.Id, "success": success})
+	return unPackResponse(ctx, rpc, device.Id, success, result)
+}
+
+func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
+	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
+	rpc := "PortCreated"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: port,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortCreated-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, portTypeFilter uint32, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "PortsStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceId},
+	}, {
+		Key:   "port_type_filter",
+		Value: &ic.IntType{Val: int64(portTypeFilter)},
+	}, {
+		Key:   "oper_status",
+		Value: &ic.IntType{Val: int64(operStatus)},
+	}}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
+	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"device-id": deviceId})
+	rpc := "DeleteAllPorts"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevicePort(ctx context.Context, deviceID string, portNo uint32) (*voltha.Port, error) {
+	logger.Debugw(ctx, "GetDevicePort", log.Fields{"device-id": deviceID})
+	rpc := "GetDevicePort"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}, {
+		Key:   "port_no",
+		Value: &ic.IntType{Val: int64(portNo)},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "GetDevicePort-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		port := &voltha.Port{}
+		if err := ptypes.UnmarshalAny(result, port); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return port, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevicePort-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) ListDevicePorts(ctx context.Context, deviceID string) ([]*voltha.Port, error) {
+	logger.Debugw(ctx, "ListDevicePorts", log.Fields{"device-id": deviceID})
+	rpc := "ListDevicePorts"
+
+	toTopic := ap.getCoreTopic(deviceID)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{{
+		Key:   "device_id",
+		Value: &voltha.ID{Id: deviceID},
+	}}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceID, args...)
+	logger.Debugw(ctx, "ListDevicePorts-response", log.Fields{"device-id": deviceID, "success": success})
+
+	if success {
+		ports := &voltha.Ports{}
+		if err := ptypes.UnmarshalAny(result, ports); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return ports.Items, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ListDevicePorts-return", log.Fields{"device-id": deviceID, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
+	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"device-id": deviceId})
+	rpc := "DeviceStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+	cStatus := &ic.IntType{Val: int64(connStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "connect_status",
+		Value: cStatus,
+	}
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
+	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
+	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"parent-device-id": parentDeviceId, "channelId": channelId})
+	rpc := "ChildDeviceDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 7)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+	ppn := &ic.IntType{Val: int64(parentPortNo)}
+	args[1] = &kafka.KVArg{
+		Key:   "parent_port_no",
+		Value: ppn,
+	}
+	cdt := &ic.StrType{Val: childDeviceType}
+	args[2] = &kafka.KVArg{
+		Key:   "child_device_type",
+		Value: cdt,
+	}
+	channel := &ic.IntType{Val: int64(channelId)}
+	args[3] = &kafka.KVArg{
+		Key:   "channel_id",
+		Value: channel,
+	}
+	vId := &ic.StrType{Val: vendorId}
+	args[4] = &kafka.KVArg{
+		Key:   "vendor_id",
+		Value: vId,
+	}
+	sNo := &ic.StrType{Val: serialNumber}
+	args[5] = &kafka.KVArg{
+		Key:   "serial_number",
+		Value: sNo,
+	}
+	oId := &ic.IntType{Val: int64(onuId)}
+	args[6] = &kafka.KVArg{
+		Key:   "onu_id",
+		Value: oId,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+
+}
+
+func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesLost"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ChildDevicesDetected"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "parent_device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetDevice", log.Fields{"device-id": deviceId})
+	rpc := "GetDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+		// TODO:  Need to get the real error code
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
+	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parent-device-id": parentDeviceId, "kwargs": kwargs})
+	rpc := "GetChildDevice"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 4)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	var cnt uint8 = 0
+	for k, v := range kwargs {
+		cnt += 1
+		if k == "serial_number" {
+			val := &ic.StrType{Val: v.(string)}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "onu_id" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		} else if k == "parent_port_no" {
+			val := &ic.IntType{Val: int64(v.(uint32))}
+			args[cnt] = &kafka.KVArg{
+				Key:   k,
+				Value: val,
+			}
+		}
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevice := &voltha.Device{}
+		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevice, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
+	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "GetChildDevices"
+
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	id := &voltha.ID{Id: parentDeviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+
+	if success {
+		volthaDevices := &voltha.Devices{}
+		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			return nil, status.Error(codes.InvalidArgument, err.Error())
+		}
+		return volthaDevices, nil
+	} else {
+		unpackResult := &ic.Error{}
+		var err error
+		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		}
+		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"parent-device-id": parentDeviceId, "success": success, "error": err})
+
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+	}
+}
+
+func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
+	logger.Debugw(ctx, "SendPacketIn", log.Fields{"device-id": deviceId, "port": port, "pktPayload": pktPayload})
+	rpc := "PacketIn"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 3)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	portNo := &ic.IntType{Val: int64(port)}
+	args[1] = &kafka.KVArg{
+		Key:   "port",
+		Value: portNo,
+	}
+	pkt := &ic.Packet{Payload: pktPayload}
+	args[2] = &kafka.KVArg{
+		Key:   "packet",
+		Value: pkt,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
+	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"device-id": deviceId, "deviceReason": deviceReason})
+	rpc := "DeviceReasonUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 2)
+	id := &voltha.ID{Id: deviceId}
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: id,
+	}
+	reason := &ic.StrType{Val: deviceReason}
+	args[1] = &kafka.KVArg{
+		Key:   "device_reason",
+		Value: reason,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
+
+func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
+	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	rpc := "DevicePMConfigUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(pmConfigs.Id)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := make([]*kafka.KVArg, 1)
+	args[0] = &kafka.KVArg{
+		Key:   "device_pm_config",
+		Value: pmConfigs,
+	}
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
+	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pmconfig-device-id": pmConfigs.Id, "success": success})
+	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
+}
+
+func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
+	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parent-device-id": parentDeviceId})
+	rpc := "ReconcileChildDevices"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(parentDeviceId)
+	replyToTopic := ap.getAdapterTopic()
+
+	args := []*kafka.KVArg{
+		{Key: "parent_device_id", Value: &voltha.ID{Id: parentDeviceId}},
+	}
+
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
+	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"parent-device-id": parentDeviceId, "success": success})
+	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+}
+
+func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
+	operStatus voltha.OperStatus_Types) error {
+	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"device-id": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	rpc := "PortStateUpdate"
+	// Use a device specific topic to send the request.  The adapter handling the device creates a device
+	// specific topic
+	toTopic := ap.getCoreTopic(deviceId)
+	args := make([]*kafka.KVArg, 4)
+	deviceID := &voltha.ID{Id: deviceId}
+	portNo := &ic.IntType{Val: int64(portNum)}
+	portType := &ic.IntType{Val: int64(pType)}
+	oStatus := &ic.IntType{Val: int64(operStatus)}
+
+	args[0] = &kafka.KVArg{
+		Key:   "device_id",
+		Value: deviceID,
+	}
+	args[1] = &kafka.KVArg{
+		Key:   "oper_status",
+		Value: oStatus,
+	}
+	args[2] = &kafka.KVArg{
+		Key:   "port_type",
+		Value: portType,
+	}
+	args[3] = &kafka.KVArg{
+		Key:   "port_no",
+		Value: portNo,
+	}
+
+	// Use a device specific topic as we are the only adaptercore handling requests for this device
+	replyToTopic := ap.getAdapterTopic()
+	success, result := ap.kafkaICProxy.InvokeRPC(log.WithSpanFromContext(context.Background(), ctx), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
+	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"device-id": deviceId, "success": success})
+	return unPackResponse(ctx, rpc, deviceId, success, result)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/events_proxy.go
similarity index 71%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/events_proxy.go
index da9c9eb..b16c1ae 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/events_proxy.go
@@ -17,6 +17,7 @@
 package common
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"strconv"
@@ -24,10 +25,10 @@
 	"time"
 
 	"github.com/golang/protobuf/ptypes"
-	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
 type EventProxy struct {
@@ -96,9 +97,9 @@
 }
 
 /* Send out device events*/
-func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if deviceEvent == nil {
-		logger.Error("Recieved empty device event")
+		logger.Error(ctx, "Recieved empty device event")
 		return errors.New("Device event nil")
 	}
 	var event voltha.Event
@@ -109,11 +110,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(&event); err != nil {
-		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
 		return err
 	}
-	logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow(ctx, "Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
 		"DeviceEventName": deviceEvent.DeviceEventName})
@@ -123,9 +124,9 @@
 }
 
 // SendKpiEvent is to send kpi events to voltha.event topic
-func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendKpiEvent(ctx context.Context, id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if kpiEvent == nil {
-		logger.Error("Recieved empty kpi event")
+		logger.Error(ctx, "Recieved empty kpi event")
 		return errors.New("KPI event nil")
 	}
 	var event voltha.Event
@@ -136,11 +137,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(&event); err != nil {
-		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+	if err := ep.sendEvent(ctx, &event); err != nil {
+		logger.Errorw(ctx, "Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
 		return err
 	}
-	logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow(ctx, "Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
 
@@ -150,11 +151,11 @@
 
 /* TODO: Send out KPI events*/
 
-func (ep *EventProxy) sendEvent(event *voltha.Event) error {
-	if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
+func (ep *EventProxy) sendEvent(ctx context.Context, event *voltha.Event) error {
+	if err := ep.kafkaClient.Send(ctx, event, &ep.eventTopic); err != nil {
 		return err
 	}
-	logger.Debugw("Sent event to kafka", log.Fields{"event": event})
+	logger.Debugw(ctx, "Sent event to kafka", log.Fields{"event": event})
 
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go
similarity index 97%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go
index 7697c05..6705c72 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/performance_metrics.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/performance_metrics.go
@@ -17,7 +17,7 @@
 package common
 
 import (
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 )
 
 type PmMetrics struct {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
similarity index 60%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
index 62d8cdd..50b9195 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/request_handler.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/request_handler.go
@@ -16,17 +16,18 @@
 package common
 
 import (
+	"context"
 	"errors"
 
 	"github.com/golang/protobuf/ptypes"
 	"github.com/golang/protobuf/ptypes/empty"
-	"github.com/opencord/voltha-lib-go/v3/pkg/adapters"
-	"github.com/opencord/voltha-lib-go/v3/pkg/adapters/adapterif"
-	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
-	"github.com/opencord/voltha-protos/v3/go/openflow_13"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"github.com/opencord/voltha-lib-go/v4/pkg/adapters"
+	"github.com/opencord/voltha-lib-go/v4/pkg/adapters/adapterif"
+	"github.com/opencord/voltha-lib-go/v4/pkg/kafka"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -58,9 +59,9 @@
 	return nil, nil
 }
 
-func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -71,38 +72,38 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
+	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
 
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Adopt_device(device); err != nil {
+	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -114,17 +115,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -133,7 +134,7 @@
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the reconcile device API on the adapter
-	if err := rhp.adapter.Reconcile_device(device); err != nil {
+	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -143,9 +144,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -157,17 +158,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -175,15 +176,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Disable_device API on the adapter
-	if err := rhp.adapter.Disable_device(device); err != nil {
+	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -195,17 +196,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -213,15 +214,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reenable_device API on the adapter
-	if err := rhp.adapter.Reenable_device(device); err != nil {
+	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -233,17 +234,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -251,7 +252,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reboot_device API on the adapter
-	if err := rhp.adapter.Reboot_device(device); err != nil {
+	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -262,9 +263,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -276,17 +277,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -294,7 +295,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the delete_device API on the adapter
-	if err := rhp.adapter.Delete_device(device); err != nil {
+	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -304,10 +305,10 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_flows_bulk")
+func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_bulk")
 	if len(args) < 5 {
-		logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -320,43 +321,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flows":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "groups":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the bulk flow update API of the adapter
-	if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_flows_incrementally")
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_flows_incrementally")
 	if len(args) < 5 {
-		logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -369,43 +370,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "group_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the incremental flow update API of the adapter
-	if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug("Update_pm_config")
+func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug(ctx, "Update_pm_config")
 	if len(args) < 2 {
-		logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -416,33 +417,33 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pm_configs":
 			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
-				logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+	logger.Debugw(ctx, "Update_pm_config", log.Fields{"device-id": device.Id, "pmConfigs": pmConfigs})
 	//Invoke the pm config update API of the adapter
-	if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
+	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debugw("Receive_packet_out", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -454,29 +455,29 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device-id", log.Fields{"error": err})
 				return nil, err
 			}
 		case "outPort":
 			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
-				logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
 				return nil, err
 			}
 		case "packet":
 			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
-				logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"device-id": deviceId.Val, "outPort": egressPort, "packet": packet})
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
+	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -490,9 +491,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -502,31 +503,31 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"device-id": device.Id})
 
 	var cap *ic.SwitchCapability
 	var err error
-	if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
+	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
-	logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
+	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
 	return cap, nil
 }
 
-func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -536,21 +537,21 @@
 		switch arg.Key {
 		case "msg":
 			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
 
 	//Invoke the inter adapter API on the handler
-	if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
+	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
@@ -577,30 +578,30 @@
 	return &voltha.ImageDownload{}, nil
 }
 
-func (rhp *RequestHandlerProxy) Enable_port(args []*ic.Argument) error {
-	logger.Debugw("enable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(args)
+func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
 	if err != nil {
-		logger.Warnw("enable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Enable_port(deviceId, port)
+	return rhp.adapter.Enable_port(ctx, deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) Disable_port(args []*ic.Argument) error {
-	logger.Debugw("disable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(args)
+func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
+	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
 	if err != nil {
-		logger.Warnw("disable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "device-id": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Disable_port(deviceId, port)
+	return rhp.adapter.Disable_port(ctx, deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) getEnableDisableParams(args []*ic.Argument) (string, *voltha.Port, error) {
-	logger.Debugw("getEnableDisableParams", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
+	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return "", nil, errors.New("invalid-number-of-args")
 	}
 	deviceId := &ic.StrType{}
@@ -609,12 +610,12 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return "", nil, err
 			}
 		case "port":
 			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
-				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
 				return "", nil, err
 			}
 		}
@@ -622,9 +623,9 @@
 	return deviceId.Val, port, nil
 }
 
-func (rhp *RequestHandlerProxy) Child_device_lost(args []*ic.Argument) error {
+func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
 	if len(args) < 4 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return errors.New("invalid-number-of-args")
 	}
 
@@ -636,22 +637,22 @@
 		switch arg.Key {
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
 				return err
 			}
 		case "pPortNo":
 			if err := ptypes.UnmarshalAny(arg.Value, pPortNo); err != nil {
-				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
 				return err
 			}
 		case "onuID":
 			if err := ptypes.UnmarshalAny(arg.Value, onuID); err != nil {
-				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -659,15 +660,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(pDeviceId.Val, fromTopic.Val)
 	//Invoke the Child_device_lost API on the adapter
-	if err := rhp.adapter.Child_device_lost(pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
+	if err := rhp.adapter.Child_device_lost(ctx, pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
 		return status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return nil
 }
 
-func (rhp *RequestHandlerProxy) Start_omci_test(args []*ic.Argument) (*ic.TestResponse, error) {
+func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
 	if len(args) < 2 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -681,26 +682,26 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "omcitestrequest":
 			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
-				logger.Warnw("cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw("Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
-	result, err := rhp.adapter.Start_omci_test(device, request)
+	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
 	if err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return result, nil
 }
-func (rhp *RequestHandlerProxy) Get_ext_value(args []*ic.Argument) (*voltha.ReturnValues, error) {
+func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
 	if len(args) < 3 {
-		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
 		return nil, errors.New("invalid-number-of-args")
 	}
 
@@ -711,24 +712,24 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-parent-device-id", log.Fields{"error": err})
 				return nil, err
 			}
 		case "valuetype":
 			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
-				logger.Warnw("cannot-unmarshal-valuetype", log.Fields{"error": err})
+				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
 				return nil, err
 			}
 		default:
-			logger.Warnw("key-not-found", log.Fields{"arg.Key": arg.Key})
+			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
 		}
 	}
 
 	//Invoke the Get_value API on the adapter
-	return rhp.adapter.Get_ext_value(pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
+	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
similarity index 88%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
index 94e8bd6..65b432c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/common/utils.go
@@ -16,9 +16,10 @@
 package common
 
 import (
+	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"google.golang.org/grpc/codes"
 	"math/rand"
 	"time"
@@ -75,7 +76,7 @@
 	return string(b)
 }
 
-func ICProxyErrorCodeToGrpcErrorCode(icErr ic.ErrorCodeCodes) codes.Code {
+func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
 	switch icErr {
 	case ic.ErrorCode_INVALID_PARAMETERS:
 		return codes.InvalidArgument
@@ -84,7 +85,7 @@
 	case ic.ErrorCode_DEADLINE_EXCEEDED:
 		return codes.DeadlineExceeded
 	default:
-		logger.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
 		return codes.Internal
 	}
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
new file mode 100644
index 0000000..b106d52
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/adapters/iAdapter.go
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package adapters
+
+import (
+	"context"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opencord/voltha-protos/v4/go/openflow_13"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
+)
+
+//IAdapter represents the set of APIs a voltha adapter has to support.
+type IAdapter interface {
+	Adapter_descriptor(ctx context.Context) error
+	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
+	Health(ctx context.Context) (*voltha.HealthStatus, error)
+	Adopt_device(ctx context.Context, device *voltha.Device) error
+	Reconcile_device(ctx context.Context, device *voltha.Device) error
+	Abandon_device(ctx context.Context, device *voltha.Device) error
+	Disable_device(ctx context.Context, device *voltha.Device) error
+	Reenable_device(ctx context.Context, device *voltha.Device) error
+	Reboot_device(ctx context.Context, device *voltha.Device) error
+	Self_test_device(ctx context.Context, device *voltha.Device) error
+	Delete_device(ctx context.Context, device *voltha.Device) error
+	Get_device_details(ctx context.Context, device *voltha.Device) error
+	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
+	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
+	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
+	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
+	Child_device_lost(ctx context.Context, parentDeviceId string, parentPortNo uint32, onuID uint32) error
+	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
+	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
index 37e05fd..294a4bd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/common.go
@@ -16,15 +16,15 @@
 package config
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
similarity index 85%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
index 24988be..4b1c841 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
@@ -22,9 +22,9 @@
 	"strings"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
-	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
 const (
@@ -41,10 +41,11 @@
 	ConfigTypeLogLevel ConfigType = iota
 	ConfigTypeMetadata
 	ConfigTypeKafka
+	ConfigTypeLogFeatures
 )
 
 func (c ConfigType) String() string {
-	return [...]string{"loglevel", "metadata", "kafka"}[c]
+	return [...]string{"loglevel", "metadata", "kafka", "logfeatures"}[c]
 }
 
 // ChangeEvent represents the event recieved from watch
@@ -96,14 +97,14 @@
 	kvStoreEventChan chan *kvstore.Event
 }
 
-func NewConfigManager(kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
+func NewConfigManager(ctx context.Context, kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
 	var kvStorePrefix string
 	if prefix, present := os.LookupEnv("KV_STORE_DATAPATH_PREFIX"); present {
 		kvStorePrefix = prefix
-		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
 	} else {
 		kvStorePrefix = defaultkvStoreDataPathPrefix
-		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
+		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
 	}
 	return &ConfigManager{
 		KVStoreConfigPrefix:   defaultkvStoreConfigPath,
@@ -176,31 +177,31 @@
 func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
 	key := c.makeConfigPath()
 
-	logger.Debugw("monitoring-for-config-change", log.Fields{"key": key})
+	logger.Debugw(ctx, "monitoring-for-config-change", log.Fields{"key": key})
 
 	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
 
 	c.kvStoreEventChan = c.cManager.Backend.CreateWatch(ctx, key, true)
 
-	go c.processKVStoreWatchEvents()
+	go c.processKVStoreWatchEvents(ctx)
 
 	return c.changeEventChan
 }
 
 // processKVStoreWatchEvents process event channel recieved from the Backend for any ChangeType
 // It checks for the EventType is valid or not.For the valid EventTypes creates ConfigChangeEvent and send it on channel
-func (c *ComponentConfig) processKVStoreWatchEvents() {
+func (c *ComponentConfig) processKVStoreWatchEvents(ctx context.Context) {
 
 	ccKeyPrefix := c.makeConfigPath()
 
-	logger.Debugw("processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+	logger.Debugw(ctx, "processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
 
 	ccPathPrefix := c.cManager.Backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
 
 	for watchResp := range c.kvStoreEventChan {
 
 		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
-			logger.Warnw("received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			logger.Warnw(ctx, "received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
 			continue
 		}
 
@@ -220,7 +221,7 @@
 func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("retrieving-config", log.Fields{"key": key})
+	logger.Debugw(ctx, "retrieving-config", log.Fields{"key": key})
 
 	if kvpair, err := c.cManager.Backend.Get(ctx, key); err != nil {
 		return "", err
@@ -230,7 +231,7 @@
 		}
 
 		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
-		logger.Debugw("retrieved-config", log.Fields{"key": key, "value": value})
+		logger.Debugw(ctx, "retrieved-config", log.Fields{"key": key, "value": value})
 		return value, nil
 	}
 }
@@ -238,7 +239,7 @@
 func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
 	key := c.makeConfigPath()
 
-	logger.Debugw("retreiving-list", log.Fields{"key": key})
+	logger.Debugw(ctx, "retreiving-list", log.Fields{"key": key})
 
 	data, err := c.cManager.Backend.List(ctx, key)
 	if err != nil {
@@ -261,7 +262,7 @@
 func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("saving-config", log.Fields{"key": key, "value": configValue})
+	logger.Debugw(ctx, "saving-config", log.Fields{"key": key, "value": configValue})
 
 	//save the data for update config
 	if err := c.cManager.Backend.Put(ctx, key, configValue); err != nil {
@@ -274,7 +275,7 @@
 	//construct key using makeConfigPath
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw("deleting-config", log.Fields{"key": key})
+	logger.Debugw(ctx, "deleting-config", log.Fields{"key": key})
 	//delete the config
 	if err := c.cManager.Backend.Delete(ctx, key); err != nil {
 		return err
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
similarity index 73%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
index b00569f..8187edc 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logcontroller.go
@@ -26,7 +26,7 @@
 	"crypto/md5"
 	"encoding/json"
 	"errors"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	"os"
 	"sort"
 	"strings"
@@ -51,9 +51,8 @@
 	initialLogLevel     string // Initial default log level set by helm chart
 }
 
-func NewComponentLogController(cm *ConfigManager) (*ComponentLogController, error) {
-
-	logger.Debug("creating-new-component-log-controller")
+func NewComponentLogController(ctx context.Context, cm *ConfigManager) (*ComponentLogController, error) {
+	logger.Debug(ctx, "creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
 		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
@@ -80,17 +79,17 @@
 // Then, it persists initial default Loglevels into Config Store before
 // starting the loading and processing of all Log Configuration
 func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
-	cc, err := NewComponentLogController(cm)
+	cc, err := NewComponentLogController(ctx, cm)
 	if err != nil {
-		logger.Errorw("unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		logger.Errorw(ctx, "unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
 		return
 	}
 
 	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
-	logger.Debugw("global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+	logger.Debugw(ctx, "global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
 
 	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
-	logger.Debugw("component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+	logger.Debugw(ctx, "component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
 
 	cc.persistInitialDefaultLogConfigs(ctx)
 
@@ -105,21 +104,21 @@
 
 	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw("failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw(ctx, "failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
 		if err != nil {
-			logger.Errorw("failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+			logger.Errorw(ctx, "failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
 		}
 	}
 
 	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw("failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw(ctx, "failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
 		if err != nil {
-			logger.Errorw("failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+			logger.Errorw(ctx, "failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
 		}
 	}
 }
@@ -129,7 +128,7 @@
 func (c *ComponentLogController) persistRegisteredLogPackageList(ctx context.Context) {
 
 	componentMetadataConfig := c.configManager.InitComponentConfig(c.ComponentName, ConfigTypeMetadata)
-	logger.Debugw("component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
+	logger.Debugw(ctx, "component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
 
 	packageList := log.GetPackageNames()
 	packageList = append(packageList, defaultLogLevelKey)
@@ -137,12 +136,12 @@
 
 	packageNames, err := json.Marshal(packageList)
 	if err != nil {
-		logger.Errorw("failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
+		logger.Errorw(ctx, "failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
 		return
 	}
 
 	if err := componentMetadataConfig.Save(ctx, logPackagesListKey, string(packageNames)); err != nil {
-		logger.Errorw("failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
+		logger.Errorw(ctx, "failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
 	}
 }
 
@@ -155,10 +154,10 @@
 	// Load and apply Log Config for first time
 	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
 	if err != nil {
-		logger.Warnw("unable-to-load-log-config-at-startup", log.Fields{"error": err})
+		logger.Warnw(ctx, "unable-to-load-log-config-at-startup", log.Fields{"error": err})
 	} else {
-		if err := c.loadAndApplyLogConfig(initialLogConfig); err != nil {
-			logger.Warnw("unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(ctx, initialLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-apply-log-config-at-startup", log.Fields{"error": err})
 		}
 	}
 
@@ -174,25 +173,25 @@
 		case configEvent = <-componentConfigEventChan:
 
 		}
-		logger.Debugw("processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+		logger.Debugw(ctx, "processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
 
 		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
 		if err != nil {
-			logger.Warnw("unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			logger.Warnw(ctx, "unable-to-fetch-updated-log-config", log.Fields{"error": err})
 			continue
 		}
 
-		logger.Debugw("applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+		logger.Debugw(ctx, "applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
 
-		if err := c.loadAndApplyLogConfig(updatedLogConfig); err != nil {
-			logger.Warnw("unable-to-load-and-apply-log-config", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(ctx, updatedLogConfig); err != nil {
+			logger.Warnw(ctx, "unable-to-load-and-apply-log-config", log.Fields{"error": err})
 		}
 	}
 
 }
 
 // get active loglevel from the zap logger
-func getActiveLogLevels() map[string]string {
+func getActiveLogLevels(ctx context.Context) map[string]string {
 	loglevels := make(map[string]string)
 
 	// now do the default log level
@@ -204,7 +203,7 @@
 	for _, packageName := range log.GetPackageNames() {
 		level, err := log.GetPackageLogLevel(packageName)
 		if err != nil {
-			logger.Warnw("unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			logger.Warnw(ctx, "unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
 			continue
 		}
 
@@ -213,7 +212,7 @@
 		}
 	}
 
-	logger.Debugw("retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+	logger.Debugw(ctx, "retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
 
 	return loglevels
 }
@@ -228,16 +227,16 @@
 	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
 	// We should use hard-coded initial default value in such cases
 	if globalDefaultLogLevel == "" {
-		logger.Warn("global-default-loglevel-not-found-in-config-store")
+		logger.Warn(ctx, "global-default-loglevel-not-found-in-config-store")
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
 	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
-		logger.Warnw("unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
-	logger.Debugw("retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+	logger.Debugw(ctx, "retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
 
 	return globalDefaultLogLevel, nil
 }
@@ -251,7 +250,7 @@
 	effectiveDefaultLogLevel := ""
 	for logConfigKey, logConfigValue := range componentLogConfig {
 		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
-			logger.Warnw("unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
 			delete(componentLogConfig, logConfigKey)
 		} else {
 			if logConfigKey == defaultLogLevelKey {
@@ -268,7 +267,7 @@
 
 	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
 
-	logger.Debugw("retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+	logger.Debugw(ctx, "retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
 
 	return componentLogConfig, nil
 }
@@ -282,7 +281,7 @@
 func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
 	globalLogLevel, err := c.getGlobalLogConfig(ctx)
 	if err != nil {
-		logger.Errorw("unable-to-retrieve-global-log-config", log.Fields{"err": err})
+		logger.Errorw(ctx, "unable-to-retrieve-global-log-config", log.Fields{"err": err})
 	}
 
 	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
@@ -302,17 +301,17 @@
 // create hash of loaded configuration using GenerateLogConfigHash
 // if there is previous hash stored, compare the hash to stored hash
 // if there is any change will call UpdateLogLevels
-func (c *ComponentLogController) loadAndApplyLogConfig(logConfig map[string]string) error {
+func (c *ComponentLogController) loadAndApplyLogConfig(ctx context.Context, logConfig map[string]string) error {
 	currentLogHash, err := GenerateLogConfigHash(logConfig)
 	if err != nil {
 		return err
 	}
 
 	if c.logHash != currentLogHash {
-		UpdateLogLevels(logConfig)
+		updateLogLevels(ctx, logConfig)
 		c.logHash = currentLogHash
 	} else {
-		logger.Debug("effective-loglevel-config-same-as-currently-active")
+		logger.Debug(ctx, "effective-loglevel-config-same-as-currently-active")
 	}
 
 	return nil
@@ -322,7 +321,7 @@
 // to identify and create map of modified Log Levels of 2 types:
 // - Packages for which log level has been changed
 // - Packages for which log level config has been cleared - set to default log level
-func createModifiedLogLevels(activeLogLevels, updatedLogLevels map[string]string) map[string]string {
+func createModifiedLogLevels(ctx context.Context, activeLogLevels, updatedLogLevels map[string]string) map[string]string {
 	defaultLevel := updatedLogLevels[defaultLogLevelKey]
 
 	modifiedLogLevels := make(map[string]string)
@@ -339,7 +338,7 @@
 	// Log warnings for all invalid packages for which log config has been set
 	for key, value := range updatedLogLevels {
 		if _, exist := activeLogLevels[key]; !exist {
-			logger.Warnw("ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+			logger.Warnw(ctx, "ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
 		}
 	}
 
@@ -349,18 +348,18 @@
 // updateLogLevels update the loglevels for the component
 // retrieve active confguration from logger
 // compare with entries one by one and apply
-func UpdateLogLevels(updatedLogConfig map[string]string) {
+func updateLogLevels(ctx context.Context, updatedLogConfig map[string]string) {
 
-	activeLogLevels := getActiveLogLevels()
-	changedLogLevels := createModifiedLogLevels(activeLogLevels, updatedLogConfig)
+	activeLogLevels := getActiveLogLevels(ctx)
+	changedLogLevels := createModifiedLogLevels(ctx, activeLogLevels, updatedLogConfig)
 
 	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
 	if len(changedLogLevels) == 0 {
-		logger.Debug("no-change-in-effective-loglevel-config")
+		logger.Debug(ctx, "no-change-in-effective-loglevel-config")
 		return
 	}
 
-	logger.Debugw("applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	logger.Debugw(ctx, "applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
 	for key, level := range changedLogLevels {
 		if key == defaultLogLevelKey {
 			if l, err := log.StringToLogLevel(level); err == nil {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
new file mode 100644
index 0000000..353ae5c
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/logfeaturescontroller.go
@@ -0,0 +1,172 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package config
+
+import (
+	"context"
+	"errors"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"os"
+	"strings"
+)
+
+const (
+	defaultTracingStatusKey        = "trace_publish"   // kvstore key containing tracing configuration status
+	defaultLogCorrelationStatusKey = "log_correlation" // kvstore key containing log correlation configuration status
+)
+
+// ComponentLogFeatureController represents Configuration for Logging related features of Tracing and Log
+// Correlation of specific Voltha component.
+type ComponentLogFeaturesController struct {
+	ComponentName               string
+	componentNameConfig         *ComponentConfig
+	configManager               *ConfigManager
+	initialTracingStatus        bool // Initial default tracing status set by helm chart
+	initialLogCorrelationStatus bool // Initial default log correlation status set by helm chart
+}
+
+func NewComponentLogFeaturesController(ctx context.Context, cm *ConfigManager) (*ComponentLogFeaturesController, error) {
+	logger.Debug(ctx, "creating-new-component-log-features-controller")
+	componentName := os.Getenv("COMPONENT_NAME")
+	if componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
+	logCorrelationStatus := log.GetGlobalLFM().GetLogCorrelationStatus()
+
+	return &ComponentLogFeaturesController{
+		ComponentName:               componentName,
+		componentNameConfig:         nil,
+		configManager:               cm,
+		initialTracingStatus:        tracingStatus,
+		initialLogCorrelationStatus: logCorrelationStatus,
+	}, nil
+
+}
+
+// StartLogFeaturesConfigProcessing persists initial config of Log Features into Config Store before
+// starting the loading and processing of Configuration updates
+func StartLogFeaturesConfigProcessing(cm *ConfigManager, ctx context.Context) {
+	cc, err := NewComponentLogFeaturesController(ctx, cm)
+	if err != nil {
+		logger.Errorw(ctx, "unable-to-construct-component-log-features-controller-instance-for-monitoring", log.Fields{"error": err})
+		return
+	}
+
+	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogFeatures)
+	logger.Debugw(ctx, "component-log-features-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+
+	cc.persistInitialLogFeaturesConfigs(ctx)
+
+	cc.processLogFeaturesConfig(ctx)
+}
+
+// Method to persist Initial status of Log Correlation and Tracing features (as set from command line)
+// into config store (etcd kvstore), if not set yet
+func (cc *ComponentLogFeaturesController) persistInitialLogFeaturesConfigs(ctx context.Context) {
+
+	_, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialTracingStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultTracingStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-tracing-status-at-startup", log.Fields{"error": err, "tracingstatus": statusString})
+		}
+	}
+
+	_, err = cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil {
+		statusString := "DISABLED"
+		if cc.initialLogCorrelationStatus {
+			statusString = "ENABLED"
+		}
+		err = cc.componentNameConfig.Save(ctx, defaultLogCorrelationStatusKey, statusString)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-persist-component-initial-log-correlation-status-at-startup", log.Fields{"error": err, "logcorrelationstatus": statusString})
+		}
+	}
+}
+
+// processLogFeaturesConfig will first load and apply configuration of log features. Then it will start waiting for any changes
+// made to configuration in config store (etcd) and apply the same
+func (cc *ComponentLogFeaturesController) processLogFeaturesConfig(ctx context.Context) {
+
+	// Load and apply Tracing Status and log correlation status for first time
+	cc.loadAndApplyTracingStatusUpdate(ctx)
+	cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+
+	componentConfigEventChan := cc.componentNameConfig.MonitorForConfigChange(ctx)
+
+	// process the change events received on the channel
+	var configEvent *ConfigChangeEvent
+	for {
+		select {
+		case <-ctx.Done():
+			return
+
+		case configEvent = <-componentConfigEventChan:
+			logger.Debugw(ctx, "processing-log-features-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+
+			if strings.HasSuffix(configEvent.ConfigAttribute, defaultTracingStatusKey) {
+				cc.loadAndApplyTracingStatusUpdate(ctx)
+			} else if strings.HasSuffix(configEvent.ConfigAttribute, defaultLogCorrelationStatusKey) {
+				cc.loadAndApplyLogCorrelationStatusUpdate(ctx)
+			}
+		}
+	}
+
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyTracingStatusUpdate(ctx context.Context) {
+
+	desiredTracingStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultTracingStatusKey)
+	if err != nil || desiredTracingStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-tracing-status-from-config-store")
+		return
+	}
+
+	if desiredTracingStatus != "ENABLED" && desiredTracingStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-tracing-status-configured-in-config-store", log.Fields{"failed-tracing-status": desiredTracingStatus, "tracing-status": log.GetGlobalLFM().GetTracePublishingStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-tracing-status", log.Fields{"tracing-status": desiredTracingStatus})
+
+	log.GetGlobalLFM().SetTracePublishingStatus(desiredTracingStatus == "ENABLED")
+}
+
+func (cc *ComponentLogFeaturesController) loadAndApplyLogCorrelationStatusUpdate(ctx context.Context) {
+
+	desiredLogCorrelationStatus, err := cc.componentNameConfig.Retrieve(ctx, defaultLogCorrelationStatusKey)
+	if err != nil || desiredLogCorrelationStatus == "" {
+		logger.Warn(ctx, "unable-to-retrieve-log-correlation-status-from-config-store")
+		return
+	}
+
+	if desiredLogCorrelationStatus != "ENABLED" && desiredLogCorrelationStatus != "DISABLED" {
+		logger.Warnw(ctx, "unsupported-log-correlation-status-configured-in-config-store", log.Fields{"failed-log-correlation-status": desiredLogCorrelationStatus, "log-correlation-status": log.GetGlobalLFM().GetLogCorrelationStatus()})
+		return
+	}
+
+	logger.Debugw(ctx, "retrieved-log-correlation-status", log.Fields{"log-correlation-status": desiredLogCorrelationStatus})
+
+	log.GetGlobalLFM().SetLogCorrelationStatus(desiredLogCorrelationStatus == "ENABLED")
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
similarity index 64%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
index 1e23a0f..d6867a5 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
@@ -20,10 +20,11 @@
 	"context"
 	"errors"
 	"fmt"
+	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 )
@@ -40,14 +41,15 @@
 	Timeout                 time.Duration
 	Address                 string
 	PathPrefix              string
-	alive                   bool          // Is this backend connection alive?
+	alive                   bool // Is this backend connection alive?
+	livenessMutex           sync.Mutex
 	liveness                chan bool     // channel to post alive state
 	LivenessChannelInterval time.Duration // regularly push alive state beyond this interval
 	lastLivenessTime        time.Time     // Instant of last alive state push
 }
 
 // NewBackend creates a new instance of a Backend structure
-func NewBackend(storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
+func NewBackend(ctx context.Context, storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
 	var err error
 
 	b := &Backend{
@@ -59,8 +61,8 @@
 		alive:                   false, // connection considered down at start
 	}
 
-	if b.Client, err = b.newClient(address, timeout); err != nil {
-		logger.Errorw("failed-to-create-kv-client",
+	if b.Client, err = b.newClient(ctx, address, timeout); err != nil {
+		logger.Errorw(ctx, "failed-to-create-kv-client",
 			log.Fields{
 				"type": storeType, "address": address,
 				"timeout": timeout, "prefix": pathPrefix,
@@ -71,34 +73,35 @@
 	return b
 }
 
-func (b *Backend) newClient(address string, timeout time.Duration) (kvstore.Client, error) {
+func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
 	switch b.StoreType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func (b *Backend) makePath(key string) string {
+func (b *Backend) makePath(ctx context.Context, key string) string {
 	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
 	return path
 }
 
-func (b *Backend) updateLiveness(alive bool) {
+func (b *Backend) updateLiveness(ctx context.Context, alive bool) {
 	// Periodically push stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Push alive state if the
 	// last push to channel was beyond livenessChannelInterval
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
 	if b.liveness != nil {
-
 		if b.alive != alive {
-			logger.Debug("update-liveness-channel-reason-change")
+			logger.Debug(ctx, "update-liveness-channel-reason-change")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
-			logger.Debug("update-liveness-channel-reason-interval")
+			logger.Debug(ctx, "update-liveness-channel-reason-interval")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		}
@@ -106,7 +109,7 @@
 
 	// Emit log message only for alive state change
 	if b.alive != alive {
-		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
+		logger.Debugw(ctx, "change-kvstore-alive-status", log.Fields{"alive": alive})
 		b.alive = alive
 	}
 }
@@ -115,9 +118,9 @@
 // post on Liveness channel
 func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
 	alive := b.Client.IsConnectionUp(ctx)
-	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
+	logger.Debugw(ctx, "kvstore-liveness-check-result", log.Fields{"alive": alive})
 
-	b.updateLiveness(alive)
+	b.updateLiveness(ctx, alive)
 	return alive
 }
 
@@ -126,16 +129,12 @@
 // or not the connection is still Live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (b *Backend) EnableLivenessChannel() chan bool {
-	logger.Debug("enable-kvstore-liveness-channel")
-
+func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
+	logger.Debug(ctx, "enable-kvstore-liveness-channel")
+	b.livenessMutex.Lock()
+	defer b.livenessMutex.Unlock()
 	if b.liveness == nil {
-		logger.Debug("create-kvstore-liveness-channel")
-
-		// Channel size of 10 to avoid any possibility of blocking in Load conditions
 		b.liveness = make(chan bool, 10)
-
-		// Post initial alive state
 		b.liveness <- b.alive
 		b.lastLivenessTime = time.Now()
 	}
@@ -144,7 +143,7 @@
 }
 
 // Extract Alive status of Kvstore based on type of error
-func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
+func (b *Backend) isErrorIndicatingAliveKvstore(ctx context.Context, err error) bool {
 	// Alive unless observed an error indicating so
 	alive := true
 
@@ -182,64 +181,82 @@
 
 // List retrieves one or more items that match the specified key
 func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
+	span, ctx := log.CreateChildSpan(ctx, "etcd-list")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.List(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return pair, err
 }
 
 // Get retrieves an item that matches the specified key
 func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
+	span, ctx := log.CreateChildSpan(ctx, "etcd-get")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.Get(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return pair, err
 }
 
 // Put stores an item value under the specifed key
 func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
-	formattedPath := b.makePath(key)
-	logger.Debugw("putting-key", log.Fields{"key": key, "path": formattedPath})
+	span, ctx := log.CreateChildSpan(ctx, "etcd-put")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Put(ctx, formattedPath, value)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return err
 }
 
 // Delete removes an item under the specified key
 func (b *Backend) Delete(ctx context.Context, key string) error {
-	formattedPath := b.makePath(key)
-	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Delete(ctx, formattedPath)
 
-	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
 
 	return err
 }
 
 // CreateWatch starts watching events for the specified key
 func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
-	formattedPath := b.makePath(key)
-	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+	span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
 
 	return b.Client.Watch(ctx, formattedPath, withPrefix)
 }
 
 // DeleteWatch stops watching events for the specified key
-func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
-	formattedPath := b.makePath(key)
-	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-watch")
+	defer span.Finish()
 
-	b.Client.CloseWatch(formattedPath, ch)
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+
+	b.Client.CloseWatch(ctx, formattedPath, ch)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
index 1cf2e1c..25cddf5 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/common.go
@@ -16,15 +16,15 @@
 package db
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
similarity index 96%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
index 158e626..480d476 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
@@ -88,6 +88,6 @@
 	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
 	ReleaseLock(lockName string) error
 	IsConnectionUp(ctx context.Context) bool // timeout in second
-	CloseWatch(key string, ch chan *Event)
-	Close()
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
index aa7aeb0..99c603d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/common.go
@@ -16,15 +16,15 @@
 package kvstore
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go
similarity index 81%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go
index d2544dd..2593608 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/consulclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go
@@ -19,7 +19,7 @@
 	"bytes"
 	"context"
 	"errors"
-	log "github.com/opencord/voltha-lib-go/v3/pkg/log"
+	log "github.com/opencord/voltha-lib-go/v4/pkg/log"
 	"sync"
 	"time"
 	//log "ciena.com/coordinator/common"
@@ -44,14 +44,13 @@
 }
 
 // NewConsulClient returns a new client for the Consul KV store
-func NewConsulClient(addr string, timeout time.Duration) (*ConsulClient, error) {
-
+func NewConsulClient(ctx context.Context, addr string, timeout time.Duration) (*ConsulClient, error) {
 	config := consulapi.DefaultConfig()
 	config.Address = addr
 	config.WaitTime = timeout
 	consul, err := consulapi.NewClient(config)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 
@@ -63,7 +62,7 @@
 
 // IsConnectionUp returns whether the connection to the Consul KV store is up
 func (c *ConsulClient) IsConnectionUp(ctx context.Context) bool {
-	logger.Error("Unimplemented function")
+	logger.Error(ctx, "Unimplemented function")
 	return false
 }
 
@@ -80,7 +79,7 @@
 	// For now we ignore meta data
 	kvps, _, err := kv.List(key, &queryOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -103,7 +102,7 @@
 	// For now we ignore meta data
 	kvp, _, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	if kvp != nil {
@@ -122,7 +121,7 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(er)
+		logger.Error(ctx, er)
 		return er
 	}
 
@@ -134,7 +133,7 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Put(&kvp, &writeOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return err
 	}
 	return nil
@@ -149,26 +148,26 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Delete(key, &writeOptions)
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return err
 	}
 	return nil
 }
 
-func (c *ConsulClient) deleteSession() {
+func (c *ConsulClient) deleteSession(ctx context.Context) {
 	if c.sessionID != "" {
-		logger.Debug("cleaning-up-session")
+		logger.Debug(ctx, "cleaning-up-session")
 		session := c.consul.Session()
 		_, err := session.Destroy(c.sessionID, nil)
 		if err != nil {
-			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+			logger.Errorw(ctx, "error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
 		}
 	}
 	c.sessionID = ""
 	c.session = nil
 }
 
-func (c *ConsulClient) createSession(ttl time.Duration, retries int) (*consulapi.Session, string, error) {
+func (c *ConsulClient) createSession(ctx context.Context, ttl time.Duration, retries int) (*consulapi.Session, string, error) {
 	session := c.consul.Session()
 	entry := &consulapi.SessionEntry{
 		Behavior: consulapi.SessionBehaviorDelete,
@@ -178,17 +177,17 @@
 	for {
 		id, meta, err := session.Create(entry, nil)
 		if err != nil {
-			logger.Errorw("create-session-error", log.Fields{"error": err})
+			logger.Errorw(ctx, "create-session-error", log.Fields{"error": err})
 			if retries == 0 {
 				return nil, "", err
 			}
 		} else if meta.RequestTime == 0 {
-			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			logger.Errorw(ctx, "create-session-bad-meta-data", log.Fields{"meta-data": meta})
 			if retries == 0 {
 				return nil, "", errors.New("bad-meta-data")
 			}
 		} else if id == "" {
-			logger.Error("create-session-nil-id")
+			logger.Error(ctx, "create-session-nil-id")
 			if retries == 0 {
 				return nil, "", errors.New("ID-nil")
 			}
@@ -199,7 +198,7 @@
 		if retries > 0 {
 			retries--
 		}
-		logger.Debug("retrying-session-create-after-a-second-delay")
+		logger.Debug(ctx, "retrying-session-create-after-a-second-delay")
 		time.Sleep(time.Duration(1) * time.Second)
 	}
 }
@@ -226,30 +225,30 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(er)
+		logger.Error(ctx, er)
 		return nil, er
 	}
 
 	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
 	if c.sessionID != "" {
-		c.deleteSession()
+		c.deleteSession(ctx)
 	}
 
 	// Clear session if reservation is not successful
 	reservationSuccessful := false
 	defer func() {
 		if !reservationSuccessful {
-			logger.Debug("deleting-session")
-			c.deleteSession()
+			logger.Debug(ctx, "deleting-session")
+			c.deleteSession(ctx)
 		}
 	}()
 
-	session, sessionID, err := c.createSession(ttl, -1)
+	session, sessionID, err := c.createSession(ctx, ttl, -1)
 	if err != nil {
-		logger.Errorw("no-session-created", log.Fields{"error": err})
+		logger.Errorw(ctx, "no-session-created", log.Fields{"error": err})
 		return "", errors.New("no-session-created")
 	}
-	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
+	logger.Debugw(ctx, "session-created", log.Fields{"session-id": sessionID})
 	c.sessionID = sessionID
 	c.session = session
 
@@ -258,11 +257,11 @@
 	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
 	result, _, err := kv.Acquire(&kvp, nil)
 	if err != nil {
-		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-acquiring-keys", log.Fields{"error": err})
 		return nil, err
 	}
 
-	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
+	logger.Debugw(ctx, "key-acquired", log.Fields{"key": key, "status": result})
 
 	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
 	m, err := c.Get(ctx, key)
@@ -270,7 +269,7 @@
 		return nil, err
 	}
 	if m != nil {
-		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		logger.Debugw(ctx, "response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
 		if m.Key == key && isEqual(m.Value, value) {
 			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
 			// per session.
@@ -300,11 +299,11 @@
 		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
 		result, _, err = kv.Release(&kvp, nil)
 		if err != nil {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		if !result {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key})
 		}
 		delete(c.keyReservations, key)
 	}
@@ -384,21 +383,21 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *ConsulClient) CloseWatch(key string, ch chan *Event) {
+func (c *ConsulClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// First close the context
 	var ok bool
 	var watchedChannelsContexts []*channelContextMap
 	c.writeLock.Lock()
 	defer c.writeLock.Unlock()
 	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
-		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		logger.Errorw(ctx, "key-has-no-watched-context-or-channel", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chCtxMap := range watchedChannelsContexts {
 		if chCtxMap.channel == ch {
-			logger.Debug("channel-found")
+			logger.Debug(ctx, "channel-found")
 			chCtxMap.cancel()
 			//close the channel
 			close(ch)
@@ -410,7 +409,7 @@
 	if pos >= 0 {
 		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
 	}
-	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+	logger.Debugw(ctx, "watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
 }
 
 func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
@@ -430,10 +429,10 @@
 	return true
 }
 
-func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
-	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
+func (c *ConsulClient) listenForKeyChange(ctx context.Context, key string, ch chan *Event) {
+	logger.Debugw(ctx, "start-watching-channel", log.Fields{"key": key, "channel": ch})
 
-	defer c.CloseWatch(key, ch)
+	defer c.CloseWatch(ctx, key, ch)
 	kv := c.consul.KV()
 	var queryOptions consulapi.QueryOptions
 	queryOptions.WaitTime = defaultKVGetTimeout
@@ -441,7 +440,7 @@
 	// Get the existing value, if any
 	previousKVPair, meta, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Debug(err)
+		logger.Debug(ctx, err)
 	}
 	lastIndex := meta.LastIndex
 
@@ -449,37 +448,37 @@
 	//var waitOptions consulapi.QueryOptions
 	var pair *consulapi.KVPair
 	//watchContext, _ := context.WithCancel(context.Background())
-	waitOptions := queryOptions.WithContext(watchContext)
+	waitOptions := queryOptions.WithContext(ctx)
 	for {
 		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
 		waitOptions.WaitIndex = lastIndex
 		pair, meta, err = kv.Get(key, waitOptions)
 		select {
-		case <-watchContext.Done():
-			logger.Debug("done-event-received-exiting")
+		case <-ctx.Done():
+			logger.Debug(ctx, "done-event-received-exiting")
 			return
 		default:
 			if err != nil {
-				logger.Warnw("error-from-watch", log.Fields{"error": err})
+				logger.Warnw(ctx, "error-from-watch", log.Fields{"error": err})
 				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
 			} else {
-				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+				logger.Debugw(ctx, "index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
 			}
 		}
 		if err != nil {
-			logger.Debug(err)
+			logger.Debug(ctx, err)
 			// On error, block for 10 milliseconds to prevent endless loop
 			time.Sleep(10 * time.Millisecond)
 		} else if meta.LastIndex <= lastIndex {
-			logger.Info("no-index-change-or-negative")
+			logger.Info(ctx, "no-index-change-or-negative")
 		} else {
-			logger.Debugw("update-received", log.Fields{"pair": pair})
+			logger.Debugw(ctx, "update-received", log.Fields{"pair": pair})
 			if pair == nil {
 				ch <- NewEvent(DELETE, key, []byte(""), -1)
 			} else if !c.isKVEqual(pair, previousKVPair) {
 				// Push the change onto the channel if the data has changed
 				// For now just assume it's a PUT change
-				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				logger.Debugw(ctx, "pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
 				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
 			}
 			previousKVPair = pair
@@ -489,7 +488,7 @@
 }
 
 // Close closes the KV store client
-func (c *ConsulClient) Close() {
+func (c *ConsulClient) Close(ctx context.Context) {
 	var writeOptions consulapi.WriteOptions
 	// Inform any goroutine it's time to say goodbye.
 	c.writeLock.Lock()
@@ -500,7 +499,7 @@
 
 	// Clear the sessionID
 	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
-		logger.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
similarity index 87%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
index 8d4a462..aa5adbf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
@@ -22,7 +22,7 @@
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	v3Client "go.etcd.io/etcd/clientv3"
 	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
 	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
@@ -40,7 +40,7 @@
 }
 
 // NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
 	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
 
 	c, err := v3Client.New(v3Client.Config{
@@ -49,7 +49,7 @@
 		LogConfig:   &logconfig,
 	})
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 
@@ -77,7 +77,7 @@
 func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
 	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -94,7 +94,7 @@
 	resp, err := c.ectdAPI.Get(ctx, key)
 
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	for _, ev := range resp.Kvs {
@@ -131,13 +131,13 @@
 	if err != nil {
 		switch err {
 		case context.Canceled:
-			logger.Warnw("context-cancelled", log.Fields{"error": err})
+			logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 		case context.DeadlineExceeded:
-			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
+			logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
 		case v3rpcTypes.ErrEmptyKey:
-			logger.Warnw("etcd-client-error", log.Fields{"error": err})
+			logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
 		default:
-			logger.Warnw("bad-endpoints", log.Fields{"error": err})
+			logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
 		}
 		return err
 	}
@@ -150,10 +150,10 @@
 
 	// delete the key
 	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
-		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
+		logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
 		return err
 	}
-	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
 	return nil
 }
 
@@ -172,7 +172,7 @@
 
 	resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
 	if err != nil {
-		logger.Error(err)
+		logger.Error(ctx, err)
 		return nil, err
 	}
 	// Register the lease id
@@ -185,7 +185,7 @@
 	defer func() {
 		if !reservationSuccessful {
 			if err = c.ReleaseReservation(context.Background(), key); err != nil {
-				logger.Error("cannot-release-lease")
+				logger.Error(ctx, "cannot-release-lease")
 			}
 		}
 	}()
@@ -240,7 +240,7 @@
 	for key, leaseID := range c.keyReservations {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -251,7 +251,7 @@
 // ReleaseReservation releases reservation for a specific key.
 func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
 	// Get the leaseid using the key
-	logger.Debugw("Release-reservation", log.Fields{"key": key})
+	logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
 	var ok bool
 	var leaseID *v3Client.LeaseID
 	c.keyReservationsLock.Lock()
@@ -263,7 +263,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Error(err)
+			logger.Error(ctx, err)
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -288,7 +288,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
+			logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
 			return err
 		}
 	} else {
@@ -320,9 +320,9 @@
 
 	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
 	// json format.
-	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
 	// Launch a go routine to listen for updates
-	go c.listenForKeyChange(channel, ch, cancel)
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
 
 	return ch
 
@@ -369,23 +369,23 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *EtcdClient) CloseWatch(key string, ch chan *Event) {
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// Get the array of channels mapping
 	var watchedChannels []map[chan *Event]v3Client.Watcher
 	var ok bool
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
-		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chMap := range watchedChannels {
 		if t, ok := chMap[ch]; ok {
-			logger.Debug("channel-found")
+			logger.Debug(ctx, "channel-found")
 			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
 			if err := t.Close(); err != nil {
-				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
 			}
 			pos = i
 			break
@@ -397,11 +397,11 @@
 	if pos >= 0 {
 		channelMaps = c.removeChannelMap(key, pos)
 	}
-	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
-func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
-	logger.Debug("start-listening-on-channel ...")
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
 	for resp := range channel {
@@ -409,7 +409,7 @@
 			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
 		}
 	}
-	logger.Debug("stop-listening-on-channel ...")
+	logger.Debug(ctx, "stop-listening-on-channel ...")
 }
 
 func getEventType(event *v3Client.Event) int {
@@ -423,9 +423,9 @@
 }
 
 // Close closes the KV store client
-func (c *EtcdClient) Close() {
+func (c *EtcdClient) Close(ctx context.Context) {
 	if err := c.ectdAPI.Close(); err != nil {
-		logger.Errorw("error-closing-client", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore/kvutils.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
index 557de3f..fdc93bd 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/common.go
@@ -16,15 +16,15 @@
 package flows
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
similarity index 94%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
index 3139144..98fad49 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/flows/flow_utils.go
@@ -17,6 +17,7 @@
 
 import (
 	"bytes"
+	"context"
 	"crypto/md5"
 	"encoding/binary"
 	"fmt"
@@ -25,8 +26,8 @@
 
 	"github.com/cevaris/ordered_map"
 	"github.com/gogo/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ofp "github.com/opencord/voltha-protos/v3/go/openflow_13"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v4/go/openflow_13"
 )
 
 var (
@@ -490,6 +491,35 @@
 	return nil
 }
 
+func GetSetActionField(ctx context.Context, flow *ofp.OfpFlowStats, ofbType ofp.OxmOfbFieldTypes) (uint32, bool) {
+	if flow == nil {
+		return 0, false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(APPLY_ACTIONS) {
+			actions := instruction.GetActions()
+			for _, action := range actions.GetActions() {
+				if action.Type == SET_FIELD {
+					setField := action.GetSetField()
+					if setField.Field.GetOfbField().Type == ofbType {
+						switch ofbType {
+						case VLAN_PCP:
+							return setField.Field.GetOfbField().GetVlanPcp(), true
+						case VLAN_VID:
+							return setField.Field.GetOfbField().GetVlanVid(), true
+						default:
+							logger.Errorw(ctx, "unsupported-ofb-field-type", log.Fields{"ofbType": ofbType})
+							return 0, false
+						}
+					}
+				}
+			}
+			return 0, false
+		}
+	}
+	return 0, false
+}
+
 func GetTunnelId(flow *ofp.OfpFlowStats) uint64 {
 	if flow == nil {
 		return 0
@@ -503,7 +533,7 @@
 }
 
 //GetMetaData - legacy get method (only want lower 32 bits)
-func GetMetaData(flow *ofp.OfpFlowStats) uint32 {
+func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
 	if flow == nil {
 		return 0
 	}
@@ -512,11 +542,11 @@
 			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
 		}
 	}
-	logger.Debug("No-metadata-present")
+	logger.Debug(ctx, "No-metadata-present")
 	return 0
 }
 
-func GetMetaData64Bit(flow *ofp.OfpFlowStats) uint64 {
+func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
 	if flow == nil {
 		return 0
 	}
@@ -525,12 +555,12 @@
 			return field.GetTableMetadata()
 		}
 	}
-	logger.Debug("No-metadata-present")
+	logger.Debug(ctx, "No-metadata-present")
 	return 0
 }
 
 // function returns write metadata value from write_metadata action field
-func GetMetadataFromWriteMetadataAction(flow *ofp.OfpFlowStats) uint64 {
+func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
 	if flow != nil {
 		for _, instruction := range flow.Instructions {
 			if instruction.Type == uint32(WRITE_METADATA) {
@@ -540,11 +570,11 @@
 			}
 		}
 	}
-	logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
+	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
 	return 0
 }
 
-func GetTechProfileIDFromWriteMetaData(metadata uint64) uint16 {
+func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
 	/*
 	   Write metadata instruction value (metadata) is 8 bytes:
 	   MS 2 bytes: C Tag
@@ -554,15 +584,15 @@
 	   This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var tpId uint16 = 0
-	logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
 	if metadata != 0 {
 		tpId = uint16((metadata >> 32) & 0xFFFF)
-		logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
 	}
 	return tpId
 }
 
-func GetEgressPortNumberFromWriteMetadata(flow *ofp.OfpFlowStats) uint32 {
+func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -571,17 +601,17 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var uniPort uint32 = 0
-	md := GetMetadataFromWriteMetadataAction(flow)
-	logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
 	if md != 0 {
 		uniPort = uint32(md & 0xFFFFFFFF)
-		logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
 	}
 	return uniPort
 
 }
 
-func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint16 {
+func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -590,10 +620,10 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var innerTag uint16 = 0
-	md := GetMetadataFromWriteMetadataAction(flow)
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
 	if md != 0 {
 		innerTag = uint16((md >> 48) & 0xFFFF)
-		logger.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
 	}
 	return innerTag
 }
@@ -607,7 +637,7 @@
 		return 0
 	}
 	if md <= 0xffffffff {
-		logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
 		return md
 	}
 	return (md >> 32) & 0xffffffff
@@ -937,12 +967,12 @@
 }
 
 // flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
-func MeterEntryFromMeterMod(meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
 	bandStats := make([]*ofp.OfpMeterBandStats, 0)
 	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
 		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
 	if meterMod == nil {
-		logger.Error("Invalid meter mod command")
+		logger.Error(ctx, "Invalid meter mod command")
 		return meter
 	}
 	// config init
@@ -964,7 +994,7 @@
 		bandStats = append(bandStats, band)
 	}
 	meter.Stats.BandStats = bandStats
-	logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
+	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
 	return meter
 
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go
similarity index 68%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go
index 0d9e3a5..0337432 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/client.go
@@ -16,9 +16,9 @@
 package kafka
 
 import (
+	"context"
+	ca "github.com/opencord/voltha-protos/v4/go/inter_container"
 	"time"
-
-	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
 const (
@@ -61,15 +61,15 @@
 
 // MsgClient represents the set of APIs  a Kafka MsgClient must implement
 type Client interface {
-	Start() error
-	Stop()
-	CreateTopic(topic *Topic, numPartition int, repFactor int) error
-	DeleteTopic(topic *Topic) error
-	Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
-	UnSubscribe(topic *Topic, ch <-chan *ca.InterContainerMessage) error
-	SubscribeForMetadata(func(fromTopic string, timestamp time.Time))
-	Send(msg interface{}, topic *Topic, keys ...string) error
-	SendLiveness() error
-	EnableLivenessChannel(enable bool) chan bool
-	EnableHealthinessChannel(enable bool) chan bool
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(ctx context.Context, topic *Topic) error
+	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
+	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness(ctx context.Context) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	EnableHealthinessChannel(ctx context.Context, enable bool) chan bool
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
index 149c150..5db364d 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/common.go
@@ -16,15 +16,15 @@
 package kafka
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
similarity index 79%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
index 1258382..796eb72 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/endpoint_manager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/endpoint_manager.go
@@ -21,9 +21,9 @@
 	"github.com/buraksezer/consistent"
 	"github.com/cespare/xxhash"
 	"github.com/golang/protobuf/proto"
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	"github.com/opencord/voltha-protos/v3/go/voltha"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	"github.com/opencord/voltha-protos/v4/go/voltha"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/status"
 	"sync"
@@ -50,15 +50,15 @@
 
 	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
 	// now this will return the topic name
-	GetEndpoint(deviceID string, serviceType string) (Endpoint, error)
+	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
 
 	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
 	// devices owned by that service need to be reconciled
-	IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error)
+	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
 
 	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
 	// test only
-	GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error)
+	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
 }
 
 type service struct {
@@ -119,9 +119,9 @@
 	return newEndpointManager(backend, opts...)
 }
 
-func (ep *endpointManager) GetEndpoint(deviceID string, serviceType string) (Endpoint, error) {
-	logger.Debugw("getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return "", err
 	}
@@ -133,13 +133,13 @@
 	if endpoint == "" {
 		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
 	}
-	logger.Debugw("returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
 	return endpoint, nil
 }
 
-func (ep *endpointManager) IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error) {
-	logger.Debugw("device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return false, nil
 	}
@@ -150,8 +150,8 @@
 	return m.getReplica() == ReplicaID(replicaNumber), nil
 }
 
-func (ep *endpointManager) GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error) {
-	owner, err := ep.getOwner(deviceID, serviceType)
+func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(ctx, deviceID, serviceType)
 	if err != nil {
 		return 0, nil
 	}
@@ -162,8 +162,8 @@
 	return m.getReplica(), nil
 }
 
-func (ep *endpointManager) getOwner(deviceID string, serviceType string) (consistent.Member, error) {
-	serv, dType, err := ep.getServiceAndDeviceType(serviceType)
+func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
 	if err != nil {
 		return nil, err
 	}
@@ -171,7 +171,7 @@
 	return serv.consistentRing.LocateKey(key), nil
 }
 
-func (ep *endpointManager) getServiceAndDeviceType(serviceType string) (*service, string, error) {
+func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
 	// Check whether service exist
 	ep.servicesLock.RLock()
 	serv, serviceExist := ep.services[serviceType]
@@ -179,7 +179,7 @@
 
 	// Load the service and device types if needed
 	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
-		if err := ep.loadServices(); err != nil {
+		if err := ep.loadServices(ctx); err != nil {
 			return nil, "", err
 		}
 
@@ -214,7 +214,7 @@
 // loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
 // the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
 // instead of watching for updates in the dB and acting on it.
-func (ep *endpointManager) loadServices() error {
+func (ep *endpointManager) loadServices(ctx context.Context) error {
 	ep.servicesLock.Lock()
 	defer ep.servicesLock.Unlock()
 	ep.deviceTypeServiceMapLock.Lock()
@@ -227,7 +227,7 @@
 	ep.deviceTypeServiceMap = make(map[string]string)
 
 	// Load the adapters
-	blobs, err := ep.backend.List(context.Background(), "adapters")
+	blobs, err := ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "adapters")
 	if err != nil {
 		return err
 	}
@@ -257,7 +257,7 @@
 		}
 	}
 	// Load the device types
-	blobs, err = ep.backend.List(context.Background(), "device_types")
+	blobs, err = ep.backend.List(log.WithSpanFromContext(context.Background(), ctx), "device_types")
 	if err != nil {
 		return err
 	}
@@ -276,13 +276,13 @@
 	if logger.V(log.DebugLevel) {
 		for key, val := range ep.services {
 			members := val.consistentRing.GetMembers()
-			logger.Debugw("service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
 			for _, m := range members {
 				n := m.(Member)
-				logger.Debugw("service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
 			}
 		}
-		logger.Debugw("device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
 	}
 	return nil
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
new file mode 100644
index 0000000..3af35d7
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/kafka_inter_container_library.go
@@ -0,0 +1,1085 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kafka
+
+import (
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/google/uuid"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
+	"github.com/opentracing/opentracing-go"
+)
+
+const (
+	DefaultMaxRetries     = 3
+	DefaultRequestTimeout = 60000 // 60000 milliseconds - to handle a wider latency range
+)
+
+const (
+	TransactionKey = "transactionID"
+	FromTopic      = "fromTopic"
+)
+
+var ErrorTransactionNotAcquired = errors.New("transaction-not-acquired")
+var ErrorTransactionInvalidId = errors.New("transaction-invalid-id")
+
+// requestHandlerChannel represents an interface associated with a channel.  Whenever, an event is
+// obtained from that channel, this interface is invoked.   This is used to handle
+// async requests into the Core via the kafka messaging bus
+type requestHandlerChannel struct {
+	requesthandlerInterface interface{}
+	ch                      <-chan *ic.InterContainerMessage
+}
+
+// transactionChannel represents a combination of a topic and a channel onto which a response received
+// on the kafka bus will be sent to
+type transactionChannel struct {
+	topic *Topic
+	ch    chan *ic.InterContainerMessage
+}
+
+type InterContainerProxy interface {
+	Start(ctx context.Context) error
+	Stop(ctx context.Context)
+	GetDefaultTopic() *Topic
+	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
+	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
+	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
+	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
+	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
+	DeleteTopic(ctx context.Context, topic Topic) error
+	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
+	SendLiveness(ctx context.Context) error
+}
+
+// interContainerProxy represents the messaging proxy
+type interContainerProxy struct {
+	kafkaAddress                   string
+	defaultTopic                   *Topic
+	defaultRequestHandlerInterface interface{}
+	kafkaClient                    Client
+	doneCh                         chan struct{}
+	doneOnce                       sync.Once
+
+	// This map is used to map a topic to an interface and channel.   When a request is received
+	// on that channel (registered to the topic) then that interface is invoked.
+	topicToRequestHandlerChannelMap   map[string]*requestHandlerChannel
+	lockTopicRequestHandlerChannelMap sync.RWMutex
+
+	// This map is used to map a channel to a response topic.   This channel handles all responses on that
+	// channel for that topic and forward them to the appropriate consumers channel, using the
+	// transactionIdToChannelMap.
+	topicToResponseChannelMap   map[string]<-chan *ic.InterContainerMessage
+	lockTopicResponseChannelMap sync.RWMutex
+
+	// This map is used to map a transaction to a consumers channel.  This is used whenever a request has been
+	// sent out and we are waiting for a response.
+	transactionIdToChannelMap     map[string]*transactionChannel
+	lockTransactionIdToChannelMap sync.RWMutex
+}
+
+type InterContainerProxyOption func(*interContainerProxy)
+
+func InterContainerAddress(address string) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaAddress = address
+	}
+}
+
+func DefaultTopic(topic *Topic) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultTopic = topic
+	}
+}
+
+func RequestHandlerInterface(handler interface{}) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.defaultRequestHandlerInterface = handler
+	}
+}
+
+func MsgClient(client Client) InterContainerProxyOption {
+	return func(args *interContainerProxy) {
+		args.kafkaClient = client
+	}
+}
+
+func newInterContainerProxy(opts ...InterContainerProxyOption) *interContainerProxy {
+	proxy := &interContainerProxy{
+		kafkaAddress: DefaultKafkaAddress,
+		doneCh:       make(chan struct{}),
+	}
+
+	for _, option := range opts {
+		option(proxy)
+	}
+
+	return proxy
+}
+
+func NewInterContainerProxy(opts ...InterContainerProxyOption) InterContainerProxy {
+	return newInterContainerProxy(opts...)
+}
+
+func (kp *interContainerProxy) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-Proxy")
+
+	// Kafka MsgClient should already have been created.  If not, output fatal error
+	if kp.kafkaClient == nil {
+		logger.Fatal(ctx, "kafka-client-not-set")
+	}
+
+	// Start the kafka client
+	if err := kp.kafkaClient.Start(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
+		return err
+	}
+
+	// Create the topic to response channel map
+	kp.topicToResponseChannelMap = make(map[string]<-chan *ic.InterContainerMessage)
+	//
+	// Create the transactionId to Channel Map
+	kp.transactionIdToChannelMap = make(map[string]*transactionChannel)
+
+	// Create the topic to request channel map
+	kp.topicToRequestHandlerChannelMap = make(map[string]*requestHandlerChannel)
+
+	return nil
+}
+
+func (kp *interContainerProxy) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-intercontainer-proxy")
+	kp.doneOnce.Do(func() { close(kp.doneCh) })
+	// TODO : Perform cleanup
+	kp.kafkaClient.Stop(ctx)
+	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+	}
+	err = kp.deleteAllTopicResponseChannelMap(ctx)
+	if err != nil {
+		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+	}
+	kp.deleteAllTransactionIdToChannelMap(ctx)
+}
+
+func (kp *interContainerProxy) GetDefaultTopic() *Topic {
+	return kp.defaultTopic
+}
+
+// InvokeAsyncRPC is used to make an RPC request asynchronously
+func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
+
+	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, !waitForResponse)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.GetDefaultTopic()
+	}
+
+	chnl := make(chan *RpcResponse)
+
+	go func() {
+
+		// once we're done,
+		// close the response channel
+		defer close(chnl)
+
+		var err error
+		var protoRequest *ic.InterContainerMessage
+
+		// Encode the request
+		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+		if err != nil {
+			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+			chnl <- NewResponse(RpcFormattingError, err, nil)
+			return
+		}
+
+		// Subscribe for response, if needed, before sending request
+		var ch <-chan *ic.InterContainerMessage
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+			log.MarkSpanError(ctx, errors.New("failed-to-subscribe-for-response"))
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// Send request - if the topic is formatted with a device Id then we will send the request using a
+		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+
+		// if the message is not sent on kafka publish an event an close the channel
+		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			chnl <- NewResponse(RpcTransportError, err, nil)
+			return
+		}
+
+		// if the client is not waiting for a response send the ack and close the channel
+		chnl <- NewResponse(RpcSent, nil, nil)
+		if !waitForResponse {
+			return
+		}
+
+		defer func() {
+			// Remove the subscription for a response on return
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			}
+		}()
+
+		// Wait for response as well as timeout or cancellation
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(ctx, msg); err != nil {
+				chnl <- NewResponse(RpcReply, err, nil)
+			} else {
+				if responseBody.Success {
+					chnl <- NewResponse(RpcReply, nil, responseBody.Result)
+				} else {
+					// response body contains an error
+					unpackErr := &ic.Error{}
+					if err := ptypes.UnmarshalAny(responseBody.Result, unpackErr); err != nil {
+						chnl <- NewResponse(RpcReply, err, nil)
+					} else {
+						chnl <- NewResponse(RpcReply, status.Error(codes.Internal, unpackErr.Reason), nil)
+					}
+				}
+			}
+		case <-ctx.Done():
+			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
+			chnl <- NewResponse(RpcTimeout, err, nil)
+		case <-kp.doneCh:
+			chnl <- NewResponse(RpcSystemClosing, nil, nil)
+			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+		}
+	}()
+	return chnl
+}
+
+// Method to extract Open-tracing Span from Context and serialize it for transport over Kafka embedded as a additional argument.
+// Additional argument is injected using key as "span" and value as Span marshalled into a byte slice
+//
+// The span name is automatically constructed using the RPC name with following convention (<rpc-name> represents name of invoked method):
+// - RPC invoked in Sync manner (WaitForResponse=true) : kafka-rpc-<rpc-name>
+// - RPC invoked in Async manner (WaitForResponse=false) : kafka-async-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Sync manner (WaitForResponse=true) : kafka-inter-adapter-rpc-<rpc-name>
+// - Inter Adapter RPC invoked in Async manner (WaitForResponse=false) : kafka-inter-adapter-async-rpc-<rpc-name>
+func (kp *interContainerProxy) embedSpanAsArg(ctx context.Context, rpc string, isAsync bool) ([]KVArg, opentracing.Span, context.Context) {
+	var err error
+	var newCtx context.Context
+	var spanToInject opentracing.Span
+
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpc == "process_inter_adapter_message" {
+		if msgType, ok := ctx.Value("inter-adapter-msg-type").(ic.InterAdapterMessageType_Types); ok {
+			spanName.WriteString("inter-adapter-")
+			rpc = msgType.String()
+		}
+	}
+
+	if isAsync {
+		spanName.WriteString("async-rpc-")
+	} else {
+		spanName.WriteString("rpc-")
+	}
+	spanName.WriteString(rpc)
+
+	if isAsync {
+		spanToInject, newCtx = log.CreateAsyncSpan(ctx, spanName.String())
+	} else {
+		spanToInject, newCtx = log.CreateChildSpan(ctx, spanName.String())
+	}
+
+	spanToInject.SetBaggageItem("rpc-span-name", spanName.String())
+
+	textMapCarrier := opentracing.TextMapCarrier(make(map[string]string))
+	if err = opentracing.GlobalTracer().Inject(spanToInject.Context(), opentracing.TextMap, textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-serialize-span-to-textmap", log.Fields{"span": spanToInject, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	var textMapJson []byte
+	if textMapJson, err = json.Marshal(textMapCarrier); err != nil {
+		logger.Warnw(ctx, "unable-to-marshal-textmap-to-json-string", log.Fields{"textMap": textMapCarrier, "error": err})
+		return nil, spanToInject, newCtx
+	}
+
+	spanArg := make([]KVArg, 1)
+	spanArg[0] = KVArg{Key: "span", Value: &ic.StrType{Val: string(textMapJson)}}
+	return spanArg, spanToInject, newCtx
+}
+
+// InvokeRPC is used to send a request to a given topic
+func (kp *interContainerProxy) InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
+	waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any) {
+
+	spanArg, span, ctx := kp.embedSpanAsArg(ctx, rpc, false)
+	if spanArg != nil {
+		kvArgs = append(kvArgs, &spanArg[0])
+	}
+	defer span.Finish()
+
+	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
+	// typically the device ID.
+	responseTopic := replyToTopic
+	if responseTopic == nil {
+		responseTopic = kp.defaultTopic
+	}
+
+	// Encode the request
+	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+	if err != nil {
+		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		log.MarkSpanError(ctx, errors.New("cannot-format-request"))
+		return false, nil
+	}
+
+	// Subscribe for response, if needed, before sending request
+	var ch <-chan *ic.InterContainerMessage
+	if waitForResponse {
+		var err error
+		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		}
+	}
+
+	// Send request - if the topic is formatted with a device Id then we will send the request using a
+	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
+	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
+	//key := GetDeviceIdFromTopic(*toTopic)
+	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	go func() {
+		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+			log.MarkSpanError(ctx, errors.New("send-failed"))
+			logger.Errorw(ctx, "send-failed", log.Fields{
+				"topic": toTopic,
+				"key":   key,
+				"error": err})
+		}
+	}()
+
+	if waitForResponse {
+		// Create a child context based on the parent context, if any
+		var cancel context.CancelFunc
+		childCtx := context.Background()
+		if ctx == nil {
+			ctx, cancel = context.WithTimeout(context.Background(), DefaultRequestTimeout*time.Millisecond)
+		} else {
+			childCtx, cancel = context.WithTimeout(ctx, DefaultRequestTimeout*time.Millisecond)
+		}
+		defer cancel()
+
+		// Wait for response as well as timeout or cancellation
+		// Remove the subscription for a response on return
+		defer func() {
+			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
+				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
+					"id":    protoRequest.Header.Id,
+					"error": err})
+			}
+		}()
+		select {
+		case msg, ok := <-ch:
+			if !ok {
+				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				log.MarkSpanError(ctx, errors.New("channel-closed"))
+				protoError := &ic.Error{Reason: "channel-closed"}
+				var marshalledArg *any.Any
+				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+					return false, nil // Should never happen
+				}
+				return false, marshalledArg
+			}
+			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			var responseBody *ic.InterContainerResponseBody
+			var err error
+			if responseBody, err = decodeResponse(ctx, msg); err != nil {
+				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
+				// FIXME we should return something
+			}
+			return responseBody.Success, responseBody.Result
+		case <-ctx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-childCtx.Done():
+			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			log.MarkSpanError(ctx, errors.New("context-cancelled"))
+			//	 pack the error as proto any type
+			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
+
+			var marshalledArg *any.Any
+			if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
+				return false, nil // Should never happen
+			}
+			return false, marshalledArg
+		case <-kp.doneCh:
+			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			return true, nil
+		}
+	}
+	return true, nil
+}
+
+// SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
+// when a message is received on a given topic
+func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
+
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
+		//if ch, err = kp.Subscribe(topic); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+
+	kp.defaultRequestHandlerInterface = handler
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, handler)
+
+	return nil
+}
+
+// SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
+// when a message is received on a given topic.  So far there is only 1 target registered per microservice
+func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
+	// Subscribe to receive messages for that topic
+	var ch <-chan *ic.InterContainerMessage
+	var err error
+	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		return err
+	}
+	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
+
+	// Launch a go routine to receive and process kafka messages
+	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
+
+	return nil
+}
+
+func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
+}
+
+func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		var err error
+		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
+		}
+		delete(kp.topicToResponseChannelMap, topic)
+		return err
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-response-channel")
+	kp.lockTopicResponseChannelMap.Lock()
+	defer kp.lockTopicResponseChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToResponseChannelMap {
+		// Unsubscribe to this topic first - this will close the subscribed channel
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToResponseChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTopicRequestHandlerChannelMap(topic string, arg *requestHandlerChannel) {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; !exist {
+		kp.topicToRequestHandlerChannelMap[topic] = arg
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			return err
+		}
+		delete(kp.topicToRequestHandlerChannelMap, topic)
+		return nil
+	} else {
+		return fmt.Errorf("%s-Topic-not-found", topic)
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
+	logger.Debug(ctx, "delete-all-topic-request-channel")
+	kp.lockTopicRequestHandlerChannelMap.Lock()
+	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
+	var unsubscribeFailTopics []string
+	for topic := range kp.topicToRequestHandlerChannelMap {
+		// Close the kafka client client first by unsubscribing to this topic
+		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
+			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			// Do not return. Continue to try to unsubscribe to other topics.
+		} else {
+			// Only delete from channel map if successfully unsubscribed.
+			delete(kp.topicToRequestHandlerChannelMap, topic)
+		}
+	}
+	if len(unsubscribeFailTopics) > 0 {
+		return fmt.Errorf("unsubscribe-errors: %v", unsubscribeFailTopics)
+	}
+	return nil
+}
+
+func (kp *interContainerProxy) addToTransactionIdToChannelMap(id string, topic *Topic, arg chan *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if _, exist := kp.transactionIdToChannelMap[id]; !exist {
+		kp.transactionIdToChannelMap[id] = &transactionChannel{topic: topic, ch: arg}
+	}
+}
+
+func (kp *interContainerProxy) deleteFromTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	if transChannel, exist := kp.transactionIdToChannelMap[id]; exist {
+		// Close the channel first
+		close(transChannel.ch)
+		delete(kp.transactionIdToChannelMap, id)
+	}
+}
+
+func (kp *interContainerProxy) deleteTopicTransactionIdToChannelMap(id string) {
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		if value.topic.Name == id {
+			close(value.ch)
+			delete(kp.transactionIdToChannelMap, key)
+		}
+	}
+}
+
+// nolint: unused
+func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
+	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
+	kp.lockTransactionIdToChannelMap.Lock()
+	defer kp.lockTransactionIdToChannelMap.Unlock()
+	for key, value := range kp.transactionIdToChannelMap {
+		close(value.ch)
+		delete(kp.transactionIdToChannelMap, key)
+	}
+}
+
+func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
+	// If we have any consumers on that topic we need to close them
+	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	}
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	}
+	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
+
+	return kp.kafkaClient.DeleteTopic(ctx, &topic)
+}
+
+func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
+	// Encode the response argument - needs to be a proto message
+	if returnedVal == nil {
+		return nil, nil
+	}
+	protoValue, ok := returnedVal.(proto.Message)
+	if !ok {
+		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		err := errors.New("response-value-not-proto-message")
+		return nil, err
+	}
+
+	// Marshal the returned value, if any
+	var marshalledReturnedVal *any.Any
+	var err error
+	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
+		return nil, err
+	}
+	return marshalledReturnedVal, nil
+}
+
+func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	responseBody := &ic.InterContainerResponseBody{
+		Success: false,
+		Result:  nil,
+	}
+	var marshalledResponseBody *any.Any
+	var err error
+	// Error should never happen here
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}
+
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+	responseHeader := &ic.Header{
+		Id:        request.Header.Id,
+		Type:      ic.MessageType_RESPONSE,
+		FromTopic: request.Header.ToTopic,
+		ToTopic:   request.Header.FromTopic,
+		KeyTopic:  request.Header.KeyTopic,
+		Timestamp: ptypes.TimestampNow(),
+	}
+
+	// Go over all returned values
+	var marshalledReturnedVal *any.Any
+	var err error
+
+	// for now we support only 1 returned value - (excluding the error)
+	if len(returnedValues) > 0 {
+		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		}
+	}
+
+	responseBody := &ic.InterContainerResponseBody{
+		Success: success,
+		Result:  marshalledReturnedVal,
+	}
+
+	// Marshal the response body
+	var marshalledResponseBody *any.Any
+	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		return nil, err
+	}
+
+	return &ic.InterContainerMessage{
+		Header: responseHeader,
+		Body:   marshalledResponseBody,
+	}, nil
+}
+
+func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+	myClassValue := reflect.ValueOf(myClass)
+	// Capitalize the first letter in the funcName to workaround the first capital letters required to
+	// invoke a function from a different package
+	funcName = strings.Title(funcName)
+	m := myClassValue.MethodByName(funcName)
+	if !m.IsValid() {
+		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
+	}
+	in := make([]reflect.Value, len(params)+1)
+	in[0] = reflect.ValueOf(ctx)
+	for i, param := range params {
+		in[i+1] = reflect.ValueOf(param)
+	}
+	out = m.Call(in)
+	return
+}
+
+func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+	arg := &KVArg{
+		Key:   TransactionKey,
+		Value: &ic.StrType{Val: transactionId},
+	}
+
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   arg.Key,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+	var marshalledArg *any.Any
+	var err error
+	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
+		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		return currentArgs
+	}
+	protoArg := &ic.Argument{
+		Key:   FromTopic,
+		Value: marshalledArg,
+	}
+	return append(currentArgs, protoArg)
+}
+
+// Method to extract the Span embedded in Kafka RPC request on the receiver side. If span is found embedded in the KV args (with key as "span"),
+// it is de-serialized and injected into the Context to be carried forward by the RPC request processor thread.
+// If no span is found embedded, even then a span is created with name as "kafka-rpc-<rpc-name>" to enrich the Context for RPC calls coming
+// from components currently not sending the span (e.g. openonu adapter)
+func (kp *interContainerProxy) enrichContextWithSpan(ctx context.Context, rpcName string, args []*ic.Argument) (opentracing.Span, context.Context) {
+
+	for _, arg := range args {
+		if arg.Key == "span" {
+			var err error
+			var textMapString ic.StrType
+			if err = ptypes.UnmarshalAny(arg.Value, &textMapString); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-kvarg-to-textmap-string", log.Fields{"value": arg.Value})
+				break
+			}
+
+			spanTextMap := make(map[string]string)
+			if err = json.Unmarshal([]byte(textMapString.Val), &spanTextMap); err != nil {
+				logger.Warnw(ctx, "unable-to-unmarshal-textmap-from-json-string", log.Fields{"textMapString": textMapString, "error": err})
+				break
+			}
+
+			var spanContext opentracing.SpanContext
+			if spanContext, err = opentracing.GlobalTracer().Extract(opentracing.TextMap, opentracing.TextMapCarrier(spanTextMap)); err != nil {
+				logger.Warnw(ctx, "unable-to-deserialize-textmap-to-span", log.Fields{"textMap": spanTextMap, "error": err})
+				break
+			}
+
+			var receivedRpcName string
+			extractBaggage := func(k, v string) bool {
+				if k == "rpc-span-name" {
+					receivedRpcName = v
+					return false
+				}
+				return true
+			}
+
+			spanContext.ForeachBaggageItem(extractBaggage)
+
+			return opentracing.StartSpanFromContext(ctx, receivedRpcName, opentracing.FollowsFrom(spanContext))
+		}
+	}
+
+	// Create new Child Span with rpc as name if no span details were received in kafka arguments
+	var spanName strings.Builder
+	spanName.WriteString("kafka-")
+
+	// In case of inter adapter message, use Msg Type for constructing RPC name
+	if rpcName == "process_inter_adapter_message" {
+		for _, arg := range args {
+			if arg.Key == "msg" {
+				iamsg := ic.InterAdapterMessage{}
+				if err := ptypes.UnmarshalAny(arg.Value, &iamsg); err == nil {
+					spanName.WriteString("inter-adapter-")
+					rpcName = iamsg.Header.Type.String()
+				}
+			}
+		}
+	}
+
+	spanName.WriteString("rpc-")
+	spanName.WriteString(rpcName)
+
+	return opentracing.StartSpanFromContext(ctx, spanName.String())
+}
+
+func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
+
+	// First extract the header to know whether this is a request - responses are handled by a different handler
+	if msg.Header.Type == ic.MessageType_REQUEST {
+		var out []reflect.Value
+		var err error
+
+		// Get the request body
+		requestBody := &ic.InterContainerRequestBody{}
+		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
+			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
+		} else {
+			span, ctx := kp.enrichContextWithSpan(ctx, requestBody.Rpc, requestBody.Args)
+			defer span.Finish()
+
+			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			// let the callee unpack the arguments as its the only one that knows the real proto type
+			// Augment the requestBody with the message Id as it will be used in scenarios where cores
+			// are set in pairs and competing
+			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
+
+			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
+			// needs to send an unsollicited message to the currently requested container
+			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
+
+			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
+			if err != nil {
+				logger.Warn(ctx, err)
+			}
+		}
+		// Response required?
+		if requestBody.ResponseRequired {
+			// If we already have an error before then just return that
+			var returnError *ic.Error
+			var returnedValues []interface{}
+			var success bool
+			if err != nil {
+				returnError = &ic.Error{Reason: err.Error()}
+				returnedValues = make([]interface{}, 1)
+				returnedValues[0] = returnError
+			} else {
+				returnedValues = make([]interface{}, 0)
+				// Check for errors first
+				lastIndex := len(out) - 1
+				if out[lastIndex].Interface() != nil { // Error
+					if retError, ok := out[lastIndex].Interface().(error); ok {
+						if retError.Error() == ErrorTransactionNotAcquired.Error() {
+							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							return // Ignore - process is in competing mode and ignored transaction
+						}
+						returnError = &ic.Error{Reason: retError.Error()}
+						returnedValues = append(returnedValues, returnError)
+					} else { // Should never happen
+						returnError = &ic.Error{Reason: "incorrect-error-returns"}
+						returnedValues = append(returnedValues, returnError)
+					}
+				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
+					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					return // Ignore - should not happen
+				} else { // Non-error case
+					success = true
+					for idx, val := range out {
+						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						if idx != lastIndex {
+							returnedValues = append(returnedValues, val.Interface())
+						}
+					}
+				}
+			}
+
+			var icm *ic.InterContainerMessage
+			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
+				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(ctx, msg)
+			}
+			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
+			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
+			// present then the key will be empty, hence all messages for a given topic will be sent to all
+			// partitions.
+			replyTopic := &Topic{Name: msg.Header.FromTopic}
+			key := msg.Header.KeyTopic
+			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			// TODO: handle error response.
+			go func() {
+				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
+					logger.Errorw(ctx, "send-reply-failed", log.Fields{
+						"topic": replyTopic,
+						"key":   key,
+						"error": err})
+				}
+			}()
+		}
+	} else if msg.Header.Type == ic.MessageType_RESPONSE {
+		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(ctx, msg)
+	} else {
+		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
+	}
+}
+
+func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+	//	Wait for messages
+	for msg := range ch {
+		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(context.Background(), msg, targetInterface)
+	}
+}
+
+func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
+	kp.lockTransactionIdToChannelMap.RLock()
+	defer kp.lockTransactionIdToChannelMap.RUnlock()
+	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
+		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		return
+	}
+	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
+}
+
+// subscribeForResponse allows a caller to subscribe to a given topic when waiting for a response.
+// This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
+// API. There is one response channel waiting for kafka messages before dispatching the message to the
+// corresponding waiting channel
+func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+
+	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
+	// broadcast any message for this topic to all channels waiting on it.
+	// Set channel size to 1 to prevent deadlock, see VOL-2708
+	ch := make(chan *ic.InterContainerMessage, 1)
+	kp.addToTransactionIdToChannelMap(trnsId, &topic, ch)
+
+	return ch, nil
+}
+
+func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
+	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+	kp.deleteFromTransactionIdToChannelMap(trnsId)
+	return nil
+}
+
+func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
+}
+
+func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
+	return kp.kafkaClient.SendLiveness(ctx)
+}
+
+//formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
+//or an error on failure
+func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+	requestHeader := &ic.Header{
+		Id:        uuid.New().String(),
+		Type:      ic.MessageType_REQUEST,
+		FromTopic: replyTopic.Name,
+		ToTopic:   toTopic.Name,
+		KeyTopic:  key,
+		Timestamp: ptypes.TimestampNow(),
+	}
+	requestBody := &ic.InterContainerRequestBody{
+		Rpc:              rpc,
+		ResponseRequired: true,
+		ReplyToTopic:     replyTopic.Name,
+	}
+
+	for _, arg := range kvArgs {
+		if arg == nil {
+			// In case the caller sends an array with empty args
+			continue
+		}
+		var marshalledArg *any.Any
+		var err error
+		// ascertain the value interface type is a proto.Message
+		protoValue, ok := arg.Value.(proto.Message)
+		if !ok {
+			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			err := errors.New("argument-value-not-proto-message")
+			return nil, err
+		}
+		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
+			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+			return nil, err
+		}
+		protoArg := &ic.Argument{
+			Key:   arg.Key,
+			Value: marshalledArg,
+		}
+		requestBody.Args = append(requestBody.Args, protoArg)
+	}
+
+	var marshalledData *any.Any
+	var err error
+	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
+		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+		return nil, err
+	}
+	request := &ic.InterContainerMessage{
+		Header: requestHeader,
+		Body:   marshalledData,
+	}
+	return request, nil
+}
+
+func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+	//	Extract the message body
+	responseBody := ic.InterContainerResponseBody{}
+	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
+		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		return nil, err
+	}
+	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+
+	return &responseBody, nil
+
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
similarity index 69%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
index 581cf49..1e4efae 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/sarama_client.go
@@ -29,8 +29,8 @@
 	"github.com/golang/protobuf/proto"
 	"github.com/golang/protobuf/ptypes"
 	"github.com/google/uuid"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	ic "github.com/opencord/voltha-protos/v4/go/inter_container"
 )
 
 // consumerChannels represents one or more consumers listening on a kafka topic.  Once a message is received on that
@@ -75,10 +75,12 @@
 	lockOfTopicLockMap            sync.RWMutex
 	metadataMaxRetry              int
 	alive                         bool
+	livenessMutex                 sync.Mutex
 	liveness                      chan bool
 	livenessChannelInterval       time.Duration
 	lastLivenessTime              time.Time
 	started                       bool
+	healthinessMutex              sync.Mutex
 	healthy                       bool
 	healthiness                   chan bool
 }
@@ -231,8 +233,8 @@
 	return client
 }
 
-func (sc *SaramaClient) Start() error {
-	logger.Info("Starting-kafka-sarama-client")
+func (sc *SaramaClient) Start(ctx context.Context) error {
+	logger.Info(ctx, "Starting-kafka-sarama-client")
 
 	// Create the Done channel
 	sc.doneCh = make(chan int, 1)
@@ -242,26 +244,26 @@
 	// Add a cleanup in case of failure to startup
 	defer func() {
 		if err != nil {
-			sc.Stop()
+			sc.Stop(ctx)
 		}
 	}()
 
 	// Create the Cluster Admin
-	if err = sc.createClusterAdmin(); err != nil {
-		logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
+	if err = sc.createClusterAdmin(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-cluster-admin", log.Fields{"error": err})
 		return err
 	}
 
 	// Create the Publisher
-	if err := sc.createPublisher(); err != nil {
-		logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
+	if err := sc.createPublisher(ctx); err != nil {
+		logger.Errorw(ctx, "Cannot-create-kafka-publisher", log.Fields{"error": err})
 		return err
 	}
 
 	if sc.consumerType == DefaultConsumerType {
 		// Create the master consumers
-		if err := sc.createConsumer(); err != nil {
-			logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
+		if err := sc.createConsumer(ctx); err != nil {
+			logger.Errorw(ctx, "Cannot-create-kafka-consumers", log.Fields{"error": err})
 			return err
 		}
 	}
@@ -269,15 +271,15 @@
 	// Create the topic to consumers/channel map
 	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
 
-	logger.Info("kafka-sarama-client-started")
+	logger.Info(ctx, "kafka-sarama-client-started")
 
 	sc.started = true
 
 	return nil
 }
 
-func (sc *SaramaClient) Stop() {
-	logger.Info("stopping-sarama-client")
+func (sc *SaramaClient) Stop(ctx context.Context) {
+	logger.Info(ctx, "stopping-sarama-client")
 
 	sc.started = false
 
@@ -286,38 +288,38 @@
 
 	if sc.producer != nil {
 		if err := sc.producer.Close(); err != nil {
-			logger.Errorw("closing-producer-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-producer-failed", log.Fields{"error": err})
 		}
 	}
 
 	if sc.consumer != nil {
 		if err := sc.consumer.Close(); err != nil {
-			logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-partition-consumer-failed", log.Fields{"error": err})
 		}
 	}
 
 	for key, val := range sc.groupConsumers {
-		logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
+		logger.Debugw(ctx, "closing-group-consumer", log.Fields{"topic": key})
 		if err := val.Close(); err != nil {
-			logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+			logger.Errorw(ctx, "closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
 		}
 	}
 
 	if sc.cAdmin != nil {
 		if err := sc.cAdmin.Close(); err != nil {
-			logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
+			logger.Errorw(ctx, "closing-cluster-admin-failed", log.Fields{"error": err})
 		}
 	}
 
 	//TODO: Clear the consumers map
 	//sc.clearConsumerChannelMap()
 
-	logger.Info("sarama-client-stopped")
+	logger.Info(ctx, "sarama-client-stopped")
 }
 
 //createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
 // the invoking function must hold the lock
-func (sc *SaramaClient) createTopic(topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) createTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
 	// Set the topic details
 	topicDetail := &sarama.TopicDetail{}
 	topicDetail.NumPartitions = int32(numPartition)
@@ -329,29 +331,29 @@
 	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
 		if err == sarama.ErrTopicAlreadyExists {
 			//	Not an error
-			logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw(ctx, "topic-already-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw("create-topic-failure", log.Fields{"error": err})
+		logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err})
 		return err
 	}
 	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
 	// do so.
-	logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	logger.Debugw(ctx, "topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
 	return nil
 }
 
 //CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
 // ensure no two go routines are performing operations on the same topic
-func (sc *SaramaClient) CreateTopic(topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	return sc.createTopic(topic, numPartition, repFactor)
+	return sc.createTopic(ctx, topic, numPartition, repFactor)
 }
 
 //DeleteTopic removes a topic from the kafka Broker
-func (sc *SaramaClient) DeleteTopic(topic *Topic) error {
+func (sc *SaramaClient) DeleteTopic(ctx context.Context, topic *Topic) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
@@ -359,16 +361,16 @@
 	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
 		if err == sarama.ErrUnknownTopicOrPartition {
 			//	Not an error as does not exist
-			logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw(ctx, "topic-not-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		logger.Errorw(ctx, "delete-topic-failed", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 
 	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
-	if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
-		logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+	if err := sc.clearTopicFromConsumerChannelMap(ctx, *topic); err != nil {
+		logger.Errorw(ctx, "failure-clearing-channels", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 	return nil
@@ -376,18 +378,18 @@
 
 // Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
 // messages from that topic
-func (sc *SaramaClient) Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "subscribe", log.Fields{"topic": topic.Name})
 
 	// If a consumers already exist for that topic then resuse it
 	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
-		logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
+		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
 		// Create a channel specific for that consumers and add it to the consumers channel map
 		ch := make(chan *ic.InterContainerMessage)
-		sc.addChannelToConsumerChannelMap(topic, ch)
+		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
 		return ch, nil
 	}
 
@@ -398,13 +400,13 @@
 	// Use the consumerType option to figure out the type of consumer to launch
 	if sc.consumerType == PartitionConsumer {
 		if sc.autoCreateTopic {
-			if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-				logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+			if err = sc.createTopic(ctx, topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
 				return nil, err
 			}
 		}
-		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
-			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(ctx, topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 	} else if sc.consumerType == GroupCustomer {
@@ -412,7 +414,7 @@
 		// does not consume from a precreated topic in some scenarios
 		//if sc.autoCreateTopic {
 		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-		//		logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		logger.Errorw(ctx, "create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
 		//		return nil, err
 		//	}
 		//}
@@ -425,13 +427,13 @@
 			// Need to use a unique group Id per topic
 			groupId = sc.consumerGroupPrefix + topic.Name
 		}
-		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
-			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(ctx, topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 			return nil, err
 		}
 
 	} else {
-		logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		logger.Warnw(ctx, "unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
 		return nil, errors.New("unknown-consumer-type")
 	}
 
@@ -439,37 +441,39 @@
 }
 
 //UnSubscribe unsubscribe a consumer from a given topic
-func (sc *SaramaClient) UnSubscribe(topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
 	var err error
-	if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
-		logger.Errorw("failed-removing-channel", log.Fields{"error": err})
+	if err = sc.removeChannelFromConsumerChannelMap(ctx, *topic, ch); err != nil {
+		logger.Errorw(ctx, "failed-removing-channel", log.Fields{"error": err})
 	}
-	if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
-		logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
+	if err = sc.deleteFromGroupConsumers(ctx, topic.Name); err != nil {
+		logger.Errorw(ctx, "failed-deleting-group-consumer", log.Fields{"error": err})
 	}
 	return err
 }
 
-func (sc *SaramaClient) SubscribeForMetadata(callback func(fromTopic string, timestamp time.Time)) {
+func (sc *SaramaClient) SubscribeForMetadata(ctx context.Context, callback func(fromTopic string, timestamp time.Time)) {
 	sc.metadataCallback = callback
 }
 
-func (sc *SaramaClient) updateLiveness(alive bool) {
+func (sc *SaramaClient) updateLiveness(ctx context.Context, alive bool) {
 	// Post a consistent stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Production of liveness
 	// events to the channel is rate-limited by livenessChannelInterval.
+	sc.livenessMutex.Lock()
+	defer sc.livenessMutex.Unlock()
 	if sc.liveness != nil {
 		if sc.alive != alive {
-			logger.Info("update-liveness-channel-because-change")
+			logger.Info(ctx, "update-liveness-channel-because-change")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
-			logger.Info("update-liveness-channel-because-interval")
+			logger.Info(ctx, "update-liveness-channel-because-interval")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		}
@@ -477,21 +481,23 @@
 
 	// Only emit a log message when the state changes
 	if sc.alive != alive {
-		logger.Info("set-client-alive", log.Fields{"alive": alive})
+		logger.Info(ctx, "set-client-alive", log.Fields{"alive": alive})
 		sc.alive = alive
 	}
 }
 
 // Once unhealthy, we never go back
-func (sc *SaramaClient) setUnhealthy() {
+func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
 	sc.healthy = false
+	sc.healthinessMutex.Lock()
+	defer sc.healthinessMutex.Unlock()
 	if sc.healthiness != nil {
-		logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
 		sc.healthiness <- sc.healthy
 	}
 }
 
-func (sc *SaramaClient) isLivenessError(err error) bool {
+func (sc *SaramaClient) isLivenessError(ctx context.Context, err error) bool {
 	// Sarama producers and consumers encapsulate the error inside
 	// a ProducerError or ConsumerError struct.
 	if prodError, ok := err.(*sarama.ProducerError); ok {
@@ -506,48 +512,48 @@
 
 	switch err.Error() {
 	case context.DeadlineExceeded.Error():
-		logger.Info("is-liveness-error-timeout")
+		logger.Info(ctx, "is-liveness-error-timeout")
 		return true
 	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
-		logger.Info("is-liveness-error-no-brokers")
+		logger.Info(ctx, "is-liveness-error-no-brokers")
 		return true
 	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
-		logger.Info("is-liveness-error-shutting-down")
+		logger.Info(ctx, "is-liveness-error-shutting-down")
 		return true
 	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
-		logger.Info("is-liveness-error-not-available")
+		logger.Info(ctx, "is-liveness-error-not-available")
 		return true
 	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
-		logger.Info("is-liveness-error-circuit-breaker-open")
+		logger.Info(ctx, "is-liveness-error-circuit-breaker-open")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
-		logger.Info("is-liveness-error-connection-refused")
+		logger.Info(ctx, "is-liveness-error-connection-refused")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
-		logger.Info("is-liveness-error-io-timeout")
+		logger.Info(ctx, "is-liveness-error-io-timeout")
 		return true
 	}
 
 	// Other errors shouldn't trigger a loss of liveness
 
-	logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
+	logger.Infow(ctx, "is-liveness-error-ignored", log.Fields{"err": err})
 
 	return false
 }
 
 // send formats and sends the request onto the kafka messaging bus.
-func (sc *SaramaClient) Send(msg interface{}, topic *Topic, keys ...string) error {
+func (sc *SaramaClient) Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error {
 
 	// Assert message is a proto message
 	var protoMsg proto.Message
 	var ok bool
 	// ascertain the value interface type is a proto.Message
 	if protoMsg, ok = msg.(proto.Message); !ok {
-		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
+		logger.Warnw(ctx, "message-not-proto-message", log.Fields{"msg": msg})
 		return fmt.Errorf("not-a-proto-msg-%s", msg)
 	}
 
@@ -555,7 +561,7 @@
 	var err error
 	//	Create the Sarama producer message
 	if marshalled, err = proto.Marshal(protoMsg); err != nil {
-		logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		logger.Errorw(ctx, "marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
 		return err
 	}
 	key := ""
@@ -574,12 +580,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(true)
+		logger.Debugw(ctx, "message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw("error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(notOk) {
-			sc.updateLiveness(false)
+		logger.Debugw(ctx, "error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
 		}
 		return notOk
 	}
@@ -591,11 +597,13 @@
 // or not the channel is still live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
-	logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
 	if enable {
+		sc.livenessMutex.Lock()
+		defer sc.livenessMutex.Unlock()
 		if sc.liveness == nil {
-			logger.Info("kafka-create-liveness-channel")
+			logger.Info(ctx, "kafka-create-liveness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -615,11 +623,13 @@
 // Enable the Healthiness monitor channel. This channel will report "false"
 // if the kafka consumers die, or some other problem occurs which is
 // catastrophic that would require re-creating the client.
-func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
-	logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
+	logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
 	if enable {
+		sc.healthinessMutex.Lock()
+		defer sc.healthinessMutex.Unlock()
 		if sc.healthiness == nil {
-			logger.Info("kafka-create-healthiness-channel")
+			logger.Info(ctx, "kafka-create-healthiness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -638,7 +648,7 @@
 
 // send an empty message on the liveness channel to check whether connectivity has
 // been restored.
-func (sc *SaramaClient) SendLiveness() error {
+func (sc *SaramaClient) SendLiveness(ctx context.Context) error {
 	if !sc.started {
 		return fmt.Errorf("SendLiveness() called while not started")
 	}
@@ -654,12 +664,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(true)
+		logger.Debugw(ctx, "liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(ctx, true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(notOk) {
-			sc.updateLiveness(false)
+		logger.Debugw(ctx, "liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(ctx, notOk) {
+			sc.updateLiveness(ctx, false)
 		}
 		return notOk
 	}
@@ -686,7 +696,7 @@
 	return sarama.OffsetNewest
 }
 
-func (sc *SaramaClient) createClusterAdmin() error {
+func (sc *SaramaClient) createClusterAdmin(ctx context.Context) error {
 	config := sarama.NewConfig()
 	config.Version = sarama.V1_0_0_0
 
@@ -694,7 +704,7 @@
 	var cAdmin sarama.ClusterAdmin
 	var err error
 	if cAdmin, err = sarama.NewClusterAdmin([]string{sc.KafkaAddress}, config); err != nil {
-		logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
+		logger.Errorw(ctx, "cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
 		return err
 	}
 	sc.cAdmin = cAdmin
@@ -739,24 +749,24 @@
 	return nil
 }
 
-func (sc *SaramaClient) addChannelToConsumerChannelMap(topic *Topic, ch chan *ic.InterContainerMessage) {
+func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		consumerCh.channels = append(consumerCh.channels, ch)
 		return
 	}
-	logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw(ctx, "consumers-channel-not-exist", log.Fields{"topic": topic.Name})
 }
 
 //closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
-func closeConsumers(consumers []interface{}) error {
+func closeConsumers(ctx context.Context, consumers []interface{}) error {
 	var err error
 	for _, consumer := range consumers {
 		//	Is it a partition consumers?
 		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			if errTemp := partionConsumer.Close(); errTemp != nil {
-				logger.Debugw("partition!!!", log.Fields{"err": errTemp})
+				logger.Debugw(ctx, "partition!!!", log.Fields{"err": errTemp})
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
 					err = nil
@@ -778,35 +788,35 @@
 	return err
 }
 
-func (sc *SaramaClient) removeChannelFromConsumerChannelMap(topic Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		// Channel will be closed in the removeChannel method
-		consumerCh.channels = removeChannel(consumerCh.channels, ch)
+		consumerCh.channels = removeChannel(ctx, consumerCh.channels, ch)
 		// If there are no more channels then we can close the consumers itself
 		if len(consumerCh.channels) == 0 {
-			logger.Debugw("closing-consumers", log.Fields{"topic": topic})
-			err := closeConsumers(consumerCh.consumers)
+			logger.Debugw(ctx, "closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(ctx, consumerCh.consumers)
 			//err := consumerCh.consumers.Close()
 			delete(sc.topicToConsumerChannelMap, topic.Name)
 			return err
 		}
 		return nil
 	}
-	logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return errors.New("topic-does-not-exist")
 }
 
-func (sc *SaramaClient) clearTopicFromConsumerChannelMap(topic Topic) error {
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(ctx context.Context, topic Topic) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		for _, ch := range consumerCh.channels {
 			// Channel will be closed in the removeChannel method
-			removeChannel(consumerCh.channels, ch)
+			removeChannel(ctx, consumerCh.channels, ch)
 		}
-		err := closeConsumers(consumerCh.consumers)
+		err := closeConsumers(ctx, consumerCh.consumers)
 		//if err == sarama.ErrUnknownTopicOrPartition {
 		//	// Not an error
 		//	err = nil
@@ -815,12 +825,12 @@
 		delete(sc.topicToConsumerChannelMap, topic.Name)
 		return err
 	}
-	logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Debugw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return nil
 }
 
 //createPublisher creates the publisher which is used to send a message onto kafka
-func (sc *SaramaClient) createPublisher() error {
+func (sc *SaramaClient) createPublisher(ctx context.Context) error {
 	// This Creates the publisher
 	config := sarama.NewConfig()
 	config.Producer.Partitioner = sarama.NewRandomPartitioner
@@ -835,16 +845,16 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
-		logger.Errorw("error-starting-publisher", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-starting-publisher", log.Fields{"error": err})
 		return err
 	} else {
 		sc.producer = producer
 	}
-	logger.Info("Kafka-publisher-created")
+	logger.Info(ctx, "Kafka-publisher-created")
 	return nil
 }
 
-func (sc *SaramaClient) createConsumer() error {
+func (sc *SaramaClient) createConsumer(ctx context.Context) error {
 	config := sarama.NewConfig()
 	config.Consumer.Return.Errors = true
 	config.Consumer.Fetch.Min = 1
@@ -855,17 +865,17 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
-		logger.Errorw("error-starting-consumers", log.Fields{"error": err})
+		logger.Errorw(ctx, "error-starting-consumers", log.Fields{"error": err})
 		return err
 	} else {
 		sc.consumer = consumer
 	}
-	logger.Info("Kafka-consumers-created")
+	logger.Info(ctx, "Kafka-consumers-created")
 	return nil
 }
 
 // createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
 	config := scc.NewConfig()
 	config.ClientID = uuid.New().String()
 	config.Group.Mode = scc.ConsumerModeMultiplex
@@ -883,10 +893,10 @@
 	var err error
 
 	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
 	//sc.groupConsumers[topic.Name] = consumer
 	sc.addToGroupConsumers(topic.Name, consumer)
@@ -911,104 +921,104 @@
 	}
 }
 
-func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
-	logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeFromAPartition(ctx context.Context, topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(err) {
-					sc.updateLiveness(false)
-					logger.Warnw("partition-consumers-error", log.Fields{"error": err})
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
+					logger.Warnw(ctx, "partition-consumers-error", log.Fields{"error": err})
 				}
 			} else {
 				// Channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
-			//logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			//logger.Debugw(ctx, "message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
 			if !ok {
 				// channel is closed
 				break startloop
 			}
 			msgBody := msg.Value
-			sc.updateLiveness(true)
-			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw("partition-invalid-message", log.Fields{"error": err})
+				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 		case <-sc.doneCh:
-			logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy()
+	logger.Infow(ctx, "partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
-	logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(err) {
-					sc.updateLiveness(false)
+				if sc.isLivenessError(ctx, err) {
+					sc.updateLiveness(ctx, false)
 				}
-				logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
 			} else {
-				logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
 				// channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
 			if !ok {
-				logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
 				// Channel closed
 				break startloop
 			}
-			sc.updateLiveness(true)
-			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(ctx, true)
+			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			msgBody := msg.Value
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw("invalid-message", log.Fields{"error": err})
+				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 			consumer.MarkOffset(msg, "")
 		case ntf := <-consumer.Notifications():
-			logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
+			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
 		case <-sc.doneCh:
-			logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy()
+	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) startConsumers(topic *Topic) error {
-	logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
+	logger.Debugw(ctx, "starting-consumers", log.Fields{"topic": topic.Name})
 	var consumerCh *consumerChannels
 	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
-		logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
+		logger.Errorw(ctx, "consumers-not-exist", log.Fields{"topic": topic.Name})
 		return errors.New("consumers-not-exist")
 	}
 	// For each consumer listening for that topic, start a consumption loop
 	for _, consumer := range consumerCh.consumers {
 		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
-			go sc.consumeFromAPartition(topic, pConsumer, consumerCh)
+			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
 		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
-			go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
+			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
 		} else {
-			logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
+			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
 			return errors.New("invalid-consumer")
 		}
 	}
@@ -1017,12 +1027,12 @@
 
 //// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 //// for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupPartitionConsumerChannel(topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	var pConsumers []sarama.PartitionConsumer
 	var err error
 
-	if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
-		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumers, err = sc.createPartitionConsumers(ctx, topic, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1044,8 +1054,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(topic); err != nil {
-			logger.Errorw("start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1056,12 +1066,12 @@
 
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupGroupConsumerChannel(topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	// TODO:  Replace this development partition consumers with a group consumers
 	var pConsumer *scc.Consumer
 	var err error
-	if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
@@ -1077,8 +1087,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(topic); err != nil {
-			logger.Errorw("start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(ctx, topic); err != nil {
+			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1087,11 +1097,11 @@
 	return consumerListeningChannel, nil
 }
 
-func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
-	logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) createPartitionConsumers(ctx context.Context, topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw(ctx, "creating-partition-consumers", log.Fields{"topic": topic.Name})
 	partitionList, err := sc.consumer.Partitions(topic.Name)
 	if err != nil {
-		logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Warnw(ctx, "get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1099,7 +1109,7 @@
 	for _, partition := range partitionList {
 		var pConsumer sarama.PartitionConsumer
 		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
-			logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			logger.Warnw(ctx, "consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 		pConsumers = append(pConsumers, pConsumer)
@@ -1107,14 +1117,14 @@
 	return pConsumers, nil
 }
 
-func removeChannel(channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
 	var i int
 	var channel chan *ic.InterContainerMessage
 	for i, channel = range channels {
 		if channel == ch {
 			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
 			close(channel)
-			logger.Debug("channel-closed")
+			logger.Debug(ctx, "channel-closed")
 			return channels[:len(channels)-1]
 		}
 	}
@@ -1129,14 +1139,14 @@
 	}
 }
 
-func (sc *SaramaClient) deleteFromGroupConsumers(topic string) error {
+func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
 	if _, exist := sc.groupConsumers[topic]; exist {
 		consumer := sc.groupConsumers[topic]
 		delete(sc.groupConsumers, topic)
 		if err := consumer.Close(); err != nil {
-			logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
+			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/kafka/utils.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/kafka/utils.go
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go
similarity index 81%
copy from vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
copy to vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go
index 1cf2e1c..b0ce81b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/db/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/common.go
@@ -13,18 +13,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package db
+package log
 
-import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-)
-
-var logger log.Logger
+var logger CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
+	logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go
similarity index 81%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go
index 1e23da1..b8d498c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/log.go
@@ -14,29 +14,39 @@
  * limitations under the License.
  */
 
-//Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
-//1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
-//2. Dynamic log level change - for all registered packages (SetAllLogLevel)
-//3. Dynamic log level change - for a given package (SetPackageLogLevel)
-//4. Provides a default logger for unregistered packages
-//5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
-//6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
 //
 // Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
-// 1.  In the appropriate package add the following in the init section of the package.  The log level can be changed
-// and any number of default fields can be added as well. The log level specifies the lowest log level that will be
-// in the output while the fields will be automatically added to all log printouts.
 //
-//	log.AddPackage(mylog.JSON, log.WarnLevel, log.Fields{"anyFieldName": "any value"})
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+//    The log level can be changed and any number of default fields can be added as well. The log level specifies
+//    the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+//    However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+//    passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
 //
-//2. In the calling package, just invoke any of the publicly available functions of the logger.  Here is an  example
-// to write an Info log with additional fields:
+//    var logger log.CLogger
+//    func init() {
+//              logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+//    }
 //
-//log.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+//    in previous step.  Here is an example to write an Info log with additional fields:
 //
-//3. To dynamically change the log level, you can use 1)SetLogLevel from inside your package or 2) SetPackageLogLevel
-// from anywhere or 3)  SetAllLogLevel from anywhere.
+//    logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
 //
+// 3. To dynamically change the log level, you can use
+//          a) SetLogLevel from inside your package or
+//          b) SetPackageLogLevel from anywhere or
+//          c) SetAllLogLevel from anywhere.
+//
+//    Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+//    Changes to configured loglevels
 
 package log
 
@@ -510,134 +520,134 @@
 
 // Debug logs a message at level Debug on the standard logger.
 func (l clogger) Debug(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Debug(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
 }
 
 // Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
 func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Debug(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
 }
 
 // Debugw logs a message at level Debug on the standard logger.
 func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Debugf(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
 }
 
 // Debugw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
 	if l.V(DebugLevel) {
-		l.log.With(ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
 	}
 }
 
 // Info logs a message at level Info on the standard logger.
 func (l clogger) Info(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Info(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
 }
 
 // Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
 func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Info(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
 	//msg := fmt.Sprintln(args...)
 	//l.sourced().Info(msg[:len(msg)-1])
 }
 
 // Infof logs a message at level Info on the standard logger.
 func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Infof(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
 }
 
 // Infow logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
 	if l.V(InfoLevel) {
-		l.log.With(ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
 	}
 }
 
 // Warn logs a message at level Warn on the standard logger.
 func (l clogger) Warn(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
 }
 
 // Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
 func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
 }
 
 // Warnf logs a message at level Warn on the standard logger.
 func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warnf(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
 }
 
 // Warnw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
 	if l.V(WarnLevel) {
-		l.log.With(ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
 	}
 }
 
 // Error logs a message at level Error on the standard logger.
 func (l clogger) Error(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Error(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
 }
 
 // Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
 func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Error(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
 }
 
 // Errorf logs a message at level Error on the standard logger.
 func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Errorf(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
 }
 
 // Errorw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
 	if l.V(ErrorLevel) {
-		l.log.With(ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
 	}
 }
 
 // Fatal logs a message at level Fatal on the standard logger.
 func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Fatal(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
 }
 
 // Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
 func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Fatal(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
 }
 
 // Fatalf logs a message at level Fatal on the standard logger.
 func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
 }
 
 // Fatalw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
 	if l.V(FatalLevel) {
-		l.log.With(ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
 	}
 }
 
 // Warning logs a message at level Warn on the standard logger.
 func (l clogger) Warning(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
 }
 
 // Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
 func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warn(args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
 }
 
 // Warningf logs a message at level Warn on the standard logger.
 func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
-	l.log.With(ExtractContextAttributes(ctx)...).Warnf(format, args...)
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
 }
 
 // V reports whether verbosity level l is at least the requested verbose level.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go
new file mode 100644
index 0000000..82c3d7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/log/utils.go
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// File contains utility functions to support Open Tracing in conjunction with
+// Enhanced Logging based on context propagation
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
+	"io"
+	"os"
+	"strings"
+	"sync"
+)
+
+const (
+	RootSpanNameKey = "op-name"
+)
+
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+	isTracePublishingEnabled bool
+	isLogCorrelationEnabled  bool
+	componentName            string // Name of component extracted from ENV variable
+	activeTraceAgentAddress  string
+	lock                     sync.Mutex
+}
+
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+	return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+	return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+	return opentracing.GlobalTracer().Extract(format, carrier)
+}
+
+// Jaeger complaint Logger instance to redirect logs to Default Logger
+type traceLogger struct {
+	logger *clogger
+}
+
+func (tl traceLogger) Error(msg string) {
+	tl.logger.Error(context.Background(), msg)
+}
+
+func (tl traceLogger) Infof(msg string, args ...interface{}) {
+	// Tracing logs should be performed only at Debug Verbosity
+	tl.logger.Debugf(context.Background(), msg, args...)
+}
+
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+	currentActiveTracer := opentracing.GlobalTracer()
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+
+	return nil
+}
+
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+	lfm.componentName = os.Getenv("COMPONENT_NAME")
+	if lfm.componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	// Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+	if !tracePublishEnabled && !logCorrelationEnabled {
+		logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+		lfm.isTracePublishingEnabled = false
+		lfm.isLogCorrelationEnabled = false
+		opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+		return traceCloser{}, nil
+	}
+
+	tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize variables representing Active Status
+	opentracing.SetGlobalTracer(tracer)
+	lfm.isTracePublishingEnabled = tracePublishEnabled
+	lfm.activeTraceAgentAddress = traceAgentAddress
+	lfm.isLogCorrelationEnabled = logCorrelationEnabled
+	return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+	currentActiveTracer := opentracing.GlobalTracer()
+	opentracing.SetGlobalTracer(tracer)
+
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isLogCorrelationEnabled {
+		logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+		// Continue using the earlier tracer instance in case of any error
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+			tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+				return
+			}
+
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isLogCorrelationEnabled = true
+		logger.Info(context.Background(), "Log Correlation has been enabled")
+
+	} else {
+		// Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		}
+
+		lfm.isLogCorrelationEnabled = false
+		logger.Info(context.Background(), "Log Correlation has been disabled")
+	}
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isTracePublishingEnabled {
+		logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+		// This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+		// Disable-Enable of Tracing
+		tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+		if err != nil {
+			logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+			return
+		}
+		lfm.replaceActiveTracer(tracer)
+
+		lfm.isTracePublishingEnabled = true
+		logger.Info(context.Background(), "Tracing Publishing has been enabled")
+	} else {
+		// Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+		if !lfm.isLogCorrelationEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		} else {
+			// Else construct a new Jaeger Instance with publishing disabled
+			tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+				return
+			}
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isTracePublishingEnabled = false
+		logger.Info(context.Background(), "Tracing Publishing has been disabled")
+	}
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+	cfg := jcfg.Configuration{ServiceName: lfm.componentName}
+
+	var err error
+	var jReporterConfig jcfg.ReporterConfig
+	var jReporterCfgOption jtracing.Reporter
+
+	logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
+
+	// Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+	jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+	jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+	if err != nil {
+		if !fallbackToLoopbackAllowed {
+			return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
+		}
+
+		logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+			Fields{"error": err, "address": traceAgentAddress})
+		// The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+		// It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+		// Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+		tracePublishEnabled = false
+
+		jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
+		jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+		if err != nil {
+			return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+		}
+	}
+
+	// To start with, we are using Constant Sampling type
+	samplerParam := 0 // 0: Do not publish span, 1: Publish
+	if tracePublishEnabled {
+		samplerParam = 1
+	}
+	jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
+	jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
+	if err != nil {
+		return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
+	}
+
+	return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+}
+
+func TerminateTracing(c io.Closer) {
+	err := c.Close()
+	if err != nil {
+		logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+	}
+}
+
+// Extracts details of Execution Context as log fields from the Tracing Span injected into the
+// context instance. Following log fields are extracted:
+// 1. Operation Name : key as 'op-name' and value as Span operation name
+// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
+//
+// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
+//
+// If no Span is found associated with context, blank slice is returned without any log fields
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+	if !lfm.isLogCorrelationEnabled {
+		return make([]interface{}, 0)
+	}
+
+	attrMap := make(map[string]interface{})
+
+	if ctx != nil {
+		if span := opentracing.SpanFromContext(ctx); span != nil {
+			if jspan, ok := span.(*jtracing.Span); ok {
+				// Add Log fields for operation identified by Root Level Span (Trace)
+				opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
+				opName := jspan.BaggageItem(RootSpanNameKey)
+
+				taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
+				taskName := jspan.OperationName()
+
+				if opName == "" {
+					span.SetBaggageItem(RootSpanNameKey, taskName)
+					opName = taskName
+				}
+
+				attrMap["op-id"] = opId
+				attrMap["op-name"] = opName
+
+				// Add Log fields for task identified by Current Span, if it is different
+				// than operation
+				if taskId != opId {
+					attrMap["task-id"] = taskId
+					attrMap["task-name"] = taskName
+				}
+
+				for k, v := range jspan.Tags() {
+					// Ignore the special tags added by Jaeger, middleware (sampler.type, span.*) present in the span
+					if strings.HasPrefix(k, "sampler.") || strings.HasPrefix(k, "span.") || k == "component" {
+						continue
+					}
+
+					attrMap[k] = v
+				}
+
+				processBaggageItems := func(k, v string) bool {
+					if k != "rpc-span-name" {
+						attrMap[k] = v
+					}
+					return true
+				}
+
+				jspan.SpanContext().ForeachBaggageItem(processBaggageItems)
+			}
+		}
+	}
+
+	return serializeMap(attrMap)
+}
+
+// Method to inject additional log fields into Span e.g. device-id
+func EnrichSpan(ctx context.Context, keyAndValues ...Fields) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		if jspan, ok := span.(*jtracing.Span); ok {
+			// Inject as a BaggageItem when the Span is the Root Span so that it propagates
+			// across the components along with Root Span (called as Trace)
+			// Else, inject as a Tag so that it is attached to the Child Task
+			isRootSpan := false
+			if jspan.SpanContext().TraceID().String() == jspan.SpanContext().SpanID().String() {
+				isRootSpan = true
+			}
+
+			for _, field := range keyAndValues {
+				for k, v := range field {
+					if isRootSpan {
+						span.SetBaggageItem(k, v.(string))
+					} else {
+						span.SetTag(k, v)
+					}
+				}
+			}
+		}
+	}
+}
+
+// Method to inject Error into the Span in event of any operation failure
+func MarkSpanError(ctx context.Context, err error) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		span.SetTag("error", true)
+		span.SetTag("err", err)
+	}
+}
+
+// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
+// operation in Synchronous or Asynchronous mode (go routine), such as following:
+// 1. Start of all implemented External API methods unless using a interceptor for auto-injection of Span (Server side impl)
+// 2. Just before calling an Third-Party lib which is invoking a External API (etcd, kafka)
+// 3. In start of a Go Routine responsible for performing a major task involving significant duration
+// 4. Any method which is suspected to be time consuming...
+func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+	childSpan, newCtx := opentracing.StartSpanFromContext(ctx, taskName)
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		childSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return childSpan, newCtx
+}
+
+// Creates a Async Child Span with Follows-From relationship from Parent Span embedded in passed context.
+// Should be used only in scenarios when
+// a) There is dis-continuation in execution and thus result of Child span does not affect the Parent flow at all
+// b) The execution of Child Span is guaranteed to start after the completion of Parent Span
+// In case of any confusion, use CreateChildSpan method
+// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
+// a channel etc.
+func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	var asyncSpan opentracing.Span
+	var newCtx context.Context
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+
+	// We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
+	if parentSpan == nil {
+		logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
+	} else {
+		// Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(context.Background(), taskName, opentracing.FollowsFrom(parentSpan.Context()))
+	}
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		asyncSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return asyncSpan, newCtx
+}
+
+// Extracts the span from Source context and injects into the supplied Target context.
+// This should be used in situations wherein we are calling a time-sensitive operation (etcd update) and hence
+// had a context.Background() used earlier to avoid any cancellation/timeout of operation by passed context.
+// This will allow propagation of span with a different base context (and not the original context)
+func WithSpanFromContext(targetCtx, sourceCtx context.Context) context.Context {
+	span := opentracing.SpanFromContext(sourceCtx)
+	return opentracing.ContextWithSpan(targetCtx, span)
+}
+
+// Utility method to convert log Fields into array of interfaces expected by zap logger methods
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
similarity index 83%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
index 211419d..d9739af 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/common.go
@@ -16,15 +16,15 @@
 package probe
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
similarity index 91%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
index e89d5bc..f13f257 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/probe/probe.go
@@ -18,7 +18,7 @@
 import (
 	"context"
 	"fmt"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	"net/http"
 	"sync"
 )
@@ -109,7 +109,7 @@
 }
 
 // RegisterService register one or more service names with the probe, status will be track against service name
-func (p *Probe) RegisterService(names ...string) {
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -118,7 +118,7 @@
 	for _, name := range names {
 		if _, ok := p.status[name]; !ok {
 			p.status[name] = ServiceStatusUnknown
-			logger.Debugw("probe-service-registered", log.Fields{"service-name": name})
+			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
 		}
 	}
 
@@ -136,7 +136,7 @@
 }
 
 // UpdateStatus utility function to send a service update to the probe
-func (p *Probe) UpdateStatus(name string, status ServiceStatus) {
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -161,7 +161,7 @@
 	} else {
 		p.isHealthy = defaultHealthFunc(p.status)
 	}
-	logger.Debugw("probe-service-status-updated",
+	logger.Debugw(ctx, "probe-service-status-updated",
 		log.Fields{
 			"service-name": name,
 			"status":       status.String(),
@@ -204,7 +204,7 @@
 func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
 	p := GetProbeFromContext(ctx)
 	if p != nil {
-		p.UpdateStatus(name, status)
+		p.UpdateStatus(ctx, name, status)
 	}
 }
 
@@ -228,25 +228,26 @@
 	}
 }
 func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	ctx := context.Background()
 	p.mutex.RLock()
 	defer p.mutex.RUnlock()
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write([]byte("{")); err != nil {
-		logger.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
 	comma := ""
 	for c, s := range p.status {
 		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
-			logger.Errorw("write-response", log.Fields{"error": err})
+			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
 		}
 		comma = ", "
 	}
 	if _, err := w.Write([]byte("}")); err != nil {
-		logger.Errorw("write-response", log.Fields{"error": err})
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -254,7 +255,7 @@
 }
 
 // ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
-func (p *Probe) ListenAndServe(address string) {
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
 	mux := http.NewServeMux()
 
 	// Returns the result of the readyFunc calculation
@@ -269,7 +270,7 @@
 		Addr:    address,
 		Handler: mux,
 	}
-	logger.Fatal(s.ListenAndServe())
+	logger.Fatal(ctx, s.ListenAndServe())
 }
 
 func (p *Probe) IsReady() bool {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/4QueueHybridProfileMap1.json
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/4QueueHybridProfileMap1.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/README.md
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/README.md
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/SingleQueueEponProfile.json b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/SingleQueueEponProfile.json
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/SingleQueueEponProfile.json
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
similarity index 82%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
index 42818f1..544c780 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/common.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/common.go
@@ -16,15 +16,15 @@
 package techprofile
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 )
 
-var logger log.Logger
+var logger log.CLogger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
 	if err != nil {
 		panic(err)
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
similarity index 69%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
index fa2a6de..438ea4a 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/config.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/config.go
@@ -16,7 +16,8 @@
 package techprofile
 
 import (
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"fmt"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
 	"time"
 )
 
@@ -31,8 +32,12 @@
 
 	defaultKVStoreTimeout = 5 * time.Second //in seconds
 
-	// Tech profile path prefix in kv store
-	defaultKVPathPrefix = "service/voltha/technology_profiles"
+	// Tech profile path prefix in kv store (for the TP template)
+	// NOTE that this hardcoded to service/voltha as the TP template is shared across stacks
+	defaultTpKvPathPrefix = "service/voltha/technology_profiles"
+
+	// Tech profile path prefix in kv store (for TP instances)
+	defaultKVPathPrefix = "%s/technology_profiles"
 
 	// Tech profile path in kv store
 	defaultTechProfileKVPath = "%s/%d" // <technology>/<tech_profile_tableID>
@@ -93,38 +98,41 @@
 
 // TechprofileFlags represents the set of configurations used
 type TechProfileFlags struct {
-	KVStoreAddress       string
-	KVStoreType          string
-	KVStoreTimeout       time.Duration
-	KVBackend            *db.Backend
-	TPKVPathPrefix       string
-	TPFileKVPath         string
-	TPInstanceKVPath     string
-	DefaultTPName        string
-	TPVersion            int
-	NumGemPorts          uint32
-	DefaultPbits         []string
-	LogLevel             int
-	DefaultTechProfileID uint32
-	DefaultNumGemPorts   uint32
+	KVStoreAddress        string
+	KVStoreType           string
+	KVStoreTimeout        time.Duration
+	KVBackend             *db.Backend // this is the backend used to store TP instances
+	DefaultTpKVBackend    *db.Backend // this is the backend used to read the TP profile
+	TPKVPathPrefix        string
+	defaultTpKvPathPrefix string
+	TPFileKVPath          string
+	TPInstanceKVPath      string
+	DefaultTPName         string
+	TPVersion             int
+	NumGemPorts           uint32
+	DefaultPbits          []string
+	LogLevel              int
+	DefaultTechProfileID  uint32
+	DefaultNumGemPorts    uint32
 }
 
-func NewTechProfileFlags(KVStoreType string, KVStoreAddress string) *TechProfileFlags {
+func NewTechProfileFlags(KVStoreType string, KVStoreAddress string, basePathKvStore string) *TechProfileFlags {
 	// initialize with default values
 	var techProfileFlags = TechProfileFlags{
-		KVBackend:            nil,
-		KVStoreAddress:       KVStoreAddress,
-		KVStoreType:          KVStoreType,
-		KVStoreTimeout:       defaultKVStoreTimeout,
-		DefaultTPName:        defaultTechProfileName,
-		TPKVPathPrefix:       defaultKVPathPrefix,
-		TPVersion:            defaultVersion,
-		TPFileKVPath:         defaultTechProfileKVPath,
-		TPInstanceKVPath:     defaultTPInstanceKVPath,
-		DefaultTechProfileID: DEFAULT_TECH_PROFILE_TABLE_ID,
-		DefaultNumGemPorts:   defaultGemportsCount,
-		DefaultPbits:         []string{defaultPbits},
-		LogLevel:             defaultLogLevel,
+		KVBackend:             nil,
+		KVStoreAddress:        KVStoreAddress,
+		KVStoreType:           KVStoreType,
+		KVStoreTimeout:        defaultKVStoreTimeout,
+		DefaultTPName:         defaultTechProfileName,
+		TPKVPathPrefix:        fmt.Sprintf(defaultKVPathPrefix, basePathKvStore),
+		defaultTpKvPathPrefix: defaultTpKvPathPrefix,
+		TPVersion:             defaultVersion,
+		TPFileKVPath:          defaultTechProfileKVPath,
+		TPInstanceKVPath:      defaultTPInstanceKVPath,
+		DefaultTechProfileID:  DEFAULT_TECH_PROFILE_TABLE_ID,
+		DefaultNumGemPorts:    defaultGemportsCount,
+		DefaultPbits:          []string{defaultPbits},
+		LogLevel:              defaultLogLevel,
 	}
 
 	return &techProfileFlags
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
similarity index 76%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
index ff37326..e21de45 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile.go
@@ -26,11 +26,11 @@
 	"sync"
 	"time"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/db/kvstore"
-	"github.com/opencord/voltha-lib-go/v3/pkg/log"
-	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
+	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
 )
 
 // Interface to pon resource manager APIs
@@ -191,7 +191,8 @@
 	McastGemID       uint32        `json:"multicast_gem_id"`
 }
 
-type iScheduler struct {
+// Instance of Scheduler
+type IScheduler struct {
 	AllocID      uint32 `json:"alloc_id"`
 	Direction    string `json:"direction"`
 	AdditionalBw string `json:"additional_bw"`
@@ -199,7 +200,9 @@
 	Weight       uint32 `json:"weight"`
 	QSchedPolicy string `json:"q_sched_policy"`
 }
-type iGemPortAttribute struct {
+
+// Instance of GemPortAttribute
+type IGemPortAttribute struct {
 	GemportID        uint32        `json:"gemport_id"`
 	MaxQueueSize     string        `json:"max_q_size"`
 	PbitMap          string        `json:"pbit_map"`
@@ -239,10 +242,10 @@
 	Version                        int                 `json:"version"`
 	NumGemPorts                    uint32              `json:"num_gem_ports"`
 	InstanceCtrl                   InstanceControl     `json:"instance_control"`
-	UsScheduler                    iScheduler          `json:"us_scheduler"`
-	DsScheduler                    iScheduler          `json:"ds_scheduler"`
-	UpstreamGemPortAttributeList   []iGemPortAttribute `json:"upstream_gem_port_attribute_list"`
-	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
+	UsScheduler                    IScheduler          `json:"us_scheduler"`
+	DsScheduler                    IScheduler          `json:"ds_scheduler"`
+	UpstreamGemPortAttributeList   []IGemPortAttribute `json:"upstream_gem_port_attribute_list"`
+	DownstreamGemPortAttributeList []IGemPortAttribute `json:"downstream_gem_port_attribute_list"`
 }
 
 // QThresholds struct for EPON
@@ -379,13 +382,13 @@
 	epon   = "EPON"
 )
 
-func (t *TechProfileMgr) SetKVClient() *db.Backend {
-	kvClient, err := newKVClient(t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
+func (t *TechProfileMgr) SetKVClient(ctx context.Context, pathPrefix string) *db.Backend {
+	kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
 	if err != nil {
-		logger.Errorw("failed-to-create-kv-client",
+		logger.Errorw(ctx, "failed-to-create-kv-client",
 			log.Fields{
 				"type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
-				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
+				"timeout": t.config.KVStoreTimeout, "prefix": pathPrefix,
 				"error": err.Error(),
 			})
 		return nil
@@ -395,7 +398,7 @@
 		StoreType:  t.config.KVStoreType,
 		Address:    t.config.KVStoreAddress,
 		Timeout:    t.config.KVStoreTimeout,
-		PathPrefix: t.config.TPKVPathPrefix}
+		PathPrefix: pathPrefix}
 
 	/* TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
 		  issue between kv store and backend , core is not calling NewBackend directly
@@ -404,34 +407,35 @@
 	*/
 }
 
-func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
 
-	logger.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
+	logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(address, timeout)
+		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string) (*TechProfileMgr, error) {
+func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string, basePathKvStore string) (*TechProfileMgr, error) {
 	var techprofileObj TechProfileMgr
-	logger.Debug("Initializing techprofile Manager")
-	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress)
-	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
+	logger.Debug(ctx, "Initializing techprofile Manager")
+	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress, basePathKvStore)
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.TPKVPathPrefix)
+	techprofileObj.config.DefaultTpKVBackend = techprofileObj.SetKVClient(ctx, techprofileObj.config.defaultTpKvPathPrefix)
 	if techprofileObj.config.KVBackend == nil {
-		logger.Error("Failed to initialize KV backend\n")
+		logger.Error(ctx, "Failed to initialize KV backend\n")
 		return nil, errors.New("KV backend init failed")
 	}
 	techprofileObj.resourceMgr = resourceMgr
-	logger.Debug("Initializing techprofile object instance success")
+	logger.Debug(ctx, "Initializing techprofile object instance success")
 	return &techprofileObj, nil
 }
 
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
-	logger.Debugw("get-tp-instance-kv-path", log.Fields{
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
+	logger.Debugw(ctx, "get-tp-instance-kv-path", log.Fields{
 		"uniPortName": uniPortName,
 		"tpId":        techProfiletblID,
 	})
@@ -454,18 +458,18 @@
 	case epon:
 		resPtr = &KvEponIns
 	default:
-		log.Errorw("unknown-tech", log.Fields{"tech": pathSlice[0]})
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tech": pathSlice[0]})
 		return nil, fmt.Errorf("unknown-tech-%s", pathSlice[0])
 	}
 
 	kvResult, _ = t.config.KVBackend.Get(ctx, path)
 	if kvResult == nil {
-		log.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
+		logger.Infow(ctx, "tp-instance-not-found-on-kv", log.Fields{"key": path})
 		return nil, nil
 	} else {
 		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
 			if err = json.Unmarshal(value, resPtr); err != nil {
-				log.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
 				return nil, errors.New("error-unmarshal-kv-result")
 			} else {
 				return resPtr, nil
@@ -476,29 +480,29 @@
 }
 
 func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
 
 func (t *TechProfileMgr) addEponProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *EponProfile) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
-	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
@@ -506,21 +510,21 @@
 func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
 	var kvtechprofile DefaultTechProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
-	kvresult, err := t.config.KVBackend.Get(ctx, key)
+	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
 	if err != nil {
-		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -530,21 +534,21 @@
 func (t *TechProfileMgr) getEponTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultEponProfile {
 	var kvtechprofile DefaultEponProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
-	kvresult, err := t.config.KVBackend.Get(ctx, key)
+	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	kvresult, err := t.config.DefaultTpKVBackend.Get(ctx, key)
 	if err != nil {
-		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -555,14 +559,14 @@
 	var tpInstance *TechProfile
 	var tpEponInstance *EponProfile
 
-	logger.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+	logger.Infow(ctx, "creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
 
 	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
 	if !uniPortNameFormat.Match([]byte(uniPortName)) {
-		logger.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
 		return nil, errors.New("uni-port-name-not-confirming-to-format")
 	}
-	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	tpInstancePath := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
 	// For example:
 	// tpInstPath like "XGS-PON/64/uni_port_name"
 	// is broken into ["XGS-PON" "64" ...]
@@ -570,74 +574,74 @@
 	if pathSlice[0] == epon {
 		tp := t.getEponTPFromKVStore(ctx, techProfiletblID)
 		if tp != nil {
-			if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
-				logger.Error("invalid-instance-ctrl-attr--using-default-tp")
-				tp = t.getDefaultEponProfile()
+			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
+				tp = t.getDefaultEponProfile(ctx)
 			} else {
-				logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 			}
 		} else {
-			logger.Info("tp-not-found-on-kv--creating-default-tp")
-			tp = t.getDefaultEponProfile()
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+			tp = t.getDefaultEponProfile(ctx)
 		}
 
 		if tpEponInstance = t.allocateEponTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpEponInstance == nil {
-			logger.Error("tp-intance-allocation-failed")
+			logger.Error(ctx, "tp-intance-allocation-failed")
 			return nil, errors.New("tp-intance-allocation-failed")
 		}
 		if err := t.addEponProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpEponInstance); err != nil {
-			logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 			return nil, errors.New("error-adding-tp-to-kv-store")
 		}
-		logger.Infow("tp-added-to-kv-store-successfully",
+		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
 			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 		return tpEponInstance, nil
 	} else {
 		tp := t.getTPFromKVStore(ctx, techProfiletblID)
 		if tp != nil {
-			if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
-				logger.Error("invalid-instance-ctrl-attr--using-default-tp")
-				tp = t.getDefaultTechProfile()
+			if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
+				logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
+				tp = t.getDefaultTechProfile(ctx)
 			} else {
-				logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+				logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 			}
 		} else {
-			logger.Info("tp-not-found-on-kv--creating-default-tp")
-			tp = t.getDefaultTechProfile()
+			logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
+			tp = t.getDefaultTechProfile(ctx)
 		}
 
 		if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
-			logger.Error("tp-intance-allocation-failed")
+			logger.Error(ctx, "tp-intance-allocation-failed")
 			return nil, errors.New("tp-intance-allocation-failed")
 		}
 		if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
-			logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+			logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 			return nil, errors.New("error-adding-tp-to-kv-store")
 		}
-		logger.Infow("tp-added-to-kv-store-successfully",
+		logger.Infow(ctx, "tp-added-to-kv-store-successfully",
 			log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 		return tpInstance, nil
 	}
 }
 
 func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
-	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
 	return t.config.KVBackend.Delete(ctx, path)
 }
 
-func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
+func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl InstanceControl) error {
 	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
-		logger.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
 		return errors.New("invalid-onu-instance-ctl-attr")
 	}
 
 	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
-		logger.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
 		return errors.New("invalid-uni-instance-ctl-attr")
 	}
 
 	if instCtl.Uni == "multi-instance" {
-		logger.Error("uni-multi-instance-tp-not-supported")
+		logger.Error(ctx, "uni-multi-instance-tp-not-supported")
 		return errors.New("uni-multi-instance-tp-not-supported")
 	}
 
@@ -646,27 +650,27 @@
 
 func (t *TechProfileMgr) allocateTPInstance(ctx context.Context, uniPortName string, tp *DefaultTechProfile, intfId uint32, tpInstPath string) *TechProfile {
 
-	var usGemPortAttributeList []iGemPortAttribute
-	var dsGemPortAttributeList []iGemPortAttribute
-	var dsMulticastGemAttributeList []iGemPortAttribute
-	var dsUnicastGemAttributeList []iGemPortAttribute
+	var usGemPortAttributeList []IGemPortAttribute
+	var dsGemPortAttributeList []IGemPortAttribute
+	var dsMulticastGemAttributeList []IGemPortAttribute
+	var dsUnicastGemAttributeList []IGemPortAttribute
 	var tcontIDs []uint32
 	var gemPorts []uint32
 	var err error
 
-	logger.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
 
 	if tp.InstanceCtrl.Onu == "multi-instance" {
 		t.AllocIDMgmtLock.Lock()
 		tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
 		t.AllocIDMgmtLock.Unlock()
 		if err != nil {
-			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		}
 	} else { // "single-instance"
 		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
-			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		} else if tpInst == nil {
 			// No "single-instance" tp found on one any uni port for the given TP ID
@@ -675,7 +679,7 @@
 			tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1)
 			t.AllocIDMgmtLock.Unlock()
 			if err != nil {
-				logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 				return nil
 			}
 		} else {
@@ -683,18 +687,18 @@
 			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
 		}
 	}
-	logger.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	t.GemPortIDMgmtLock.Lock()
 	gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts)
 	t.GemPortIDMgmtLock.Unlock()
 	if err != nil {
-		logger.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
 	}
-	logger.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
-			iGemPortAttribute{GemportID: gemPorts[index],
+			IGemPortAttribute{GemportID: gemPorts[index],
 				MaxQueueSize:     tp.UpstreamGemPortAttributeList[index].MaxQueueSize,
 				PbitMap:          tp.UpstreamGemPortAttributeList[index].PbitMap,
 				AesEncryption:    tp.UpstreamGemPortAttributeList[index].AesEncryption,
@@ -705,12 +709,12 @@
 				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 	}
 
-	logger.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
 	//put multicast and unicast downstream GEM port attributes in different lists first
 	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
 		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
 			dsMulticastGemAttributeList = append(dsMulticastGemAttributeList,
-				iGemPortAttribute{
+				IGemPortAttribute{
 					McastGemID:       tp.DownstreamGemPortAttributeList[index].McastGemID,
 					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
 					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
@@ -725,7 +729,7 @@
 					SControlList:     tp.DownstreamGemPortAttributeList[index].SControlList})
 		} else {
 			dsUnicastGemAttributeList = append(dsUnicastGemAttributeList,
-				iGemPortAttribute{
+				IGemPortAttribute{
 					MaxQueueSize:     tp.DownstreamGemPortAttributeList[index].MaxQueueSize,
 					PbitMap:          tp.DownstreamGemPortAttributeList[index].PbitMap,
 					AesEncryption:    tp.DownstreamGemPortAttributeList[index].AesEncryption,
@@ -739,7 +743,7 @@
 	//add unicast downstream GEM ports to dsGemPortAttributeList
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		dsGemPortAttributeList = append(dsGemPortAttributeList,
-			iGemPortAttribute{GemportID: gemPorts[index],
+			IGemPortAttribute{GemportID: gemPorts[index],
 				MaxQueueSize:     dsUnicastGemAttributeList[index].MaxQueueSize,
 				PbitMap:          dsUnicastGemAttributeList[index].PbitMap,
 				AesEncryption:    dsUnicastGemAttributeList[index].AesEncryption,
@@ -761,14 +765,14 @@
 		Version:              tp.Version,
 		NumGemPorts:          tp.NumGemPorts,
 		InstanceCtrl:         tp.InstanceCtrl,
-		UsScheduler: iScheduler{
+		UsScheduler: IScheduler{
 			AllocID:      tcontIDs[0],
 			Direction:    tp.UsScheduler.Direction,
 			AdditionalBw: tp.UsScheduler.AdditionalBw,
 			Priority:     tp.UsScheduler.Priority,
 			Weight:       tp.UsScheduler.Weight,
 			QSchedPolicy: tp.UsScheduler.QSchedPolicy},
-		DsScheduler: iScheduler{
+		DsScheduler: IScheduler{
 			AllocID:      tcontIDs[0],
 			Direction:    tp.DsScheduler.Direction,
 			AdditionalBw: tp.DsScheduler.AdditionalBw,
@@ -788,22 +792,22 @@
 	var gemPorts []uint32
 	var err error
 
-	log.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
 
 	if tp.InstanceCtrl.Onu == "multi-instance" {
 		if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		}
 	} else { // "single-instance"
 		if tpInst, err := t.getSingleInstanceEponTp(ctx, tpInstPath); err != nil {
-			log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		} else if tpInst == nil {
 			// No "single-instance" tp found on one any uni port for the given TP ID
 			// Allocate a new TcontID or AllocID
 			if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-				log.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 				return nil
 			}
 		} else {
@@ -811,12 +815,12 @@
 			tcontIDs = append(tcontIDs, tpInst.AllocID)
 		}
 	}
-	log.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
-		log.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
 	}
-	log.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usQueueAttributeList = append(usQueueAttributeList,
 			iUpstreamQueueAttribute{GemportID: gemPorts[index],
@@ -837,7 +841,7 @@
 				DiscardConfig:             tp.UpstreamQueueAttributeList[index].DiscardConfig})
 	}
 
-	log.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamQueueAttributeList))
+	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamQueueAttributeList))
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		dsQueueAttributeList = append(dsQueueAttributeList,
 			iDownstreamQueueAttribute{GemportID: gemPorts[index],
@@ -879,10 +883,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -903,10 +907,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -914,13 +918,12 @@
 	return nil, nil
 }
 
-func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
-
+func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *DefaultTechProfile {
 	var usGemPortAttributeList []GemPortAttribute
 	var dsGemPortAttributeList []GemPortAttribute
 
 	for _, pbit := range t.config.DefaultPbits {
-		logger.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
+		logger.Debugw(ctx, "Creating GEM port", log.Fields{"pbit": pbit})
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			GemPortAttribute{
 				MaxQueueSize:     defaultMaxQueueSize,
@@ -978,13 +981,13 @@
 }
 
 // getDefaultTechProfile function for EPON
-func (t *TechProfileMgr) getDefaultEponProfile() *DefaultEponProfile {
+func (t *TechProfileMgr) getDefaultEponProfile(ctx context.Context) *DefaultEponProfile {
 
 	var usQueueAttributeList []UpstreamQueueAttribute
 	var dsQueueAttributeList []DownstreamQueueAttribute
 
 	for _, pbit := range t.config.DefaultPbits {
-		log.Debugw("Creating Queue", log.Fields{"pbit": pbit})
+		logger.Debugw(ctx, "Creating Queue", log.Fields{"pbit": pbit})
 		usQueueAttributeList = append(usQueueAttributeList,
 			UpstreamQueueAttribute{
 				MaxQueueSize:              defaultMaxQueueSize,
@@ -1041,7 +1044,7 @@
 		DownstreamQueueAttributeList: dsQueueAttributeList}
 }
 
-func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
+func (t *TechProfileMgr) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
 	var result int32 = -1
 
 	if paramType == "direction" {
@@ -1059,7 +1062,7 @@
 	} else if paramType == "sched_policy" {
 		for key, val := range tp_pb.SchedulingPolicy_value {
 			if key == paramKey {
-				logger.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
+				logger.Debugw(ctx, "Got value in proto", log.Fields{"key": key, "value": val})
 				result = val
 			}
 		}
@@ -1070,29 +1073,29 @@
 			}
 		}
 	} else {
-		logger.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		logger.Error(ctx, "Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
 		return -1
 	}
-	logger.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
+	logger.Debugw(ctx, "Got value in proto", log.Fields{"key": paramKey, "value": result})
 	return result
 }
 
-func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
+func (t *TechProfileMgr) GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		logger.Errorf(ctx, "Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 	}
 
@@ -1104,23 +1107,23 @@
 		SchedPolicy:  policy}, nil
 }
 
-func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+func (t *TechProfileMgr) GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
 
-	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
+	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		logger.Errorf(ctx, "Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 	}
 
@@ -1144,7 +1147,7 @@
 	return tSched
 }
 
-func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+func (tpm *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
 
 	var encryp bool
 	if Dir == tp_pb.Direction_UPSTREAM {
@@ -1158,20 +1161,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.UsScheduler.Direction)),
 				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -1181,7 +1184,7 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	} else if Dir == tp_pb.Direction_DOWNSTREAM {
 		//downstream GEM ports
@@ -1198,20 +1201,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
 				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -1221,11 +1224,11 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	}
 
-	logger.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
+	logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", Dir)
 	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
 }
 
@@ -1235,7 +1238,7 @@
 		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
 }
 
-func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue {
 	var encryp bool
 	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
 	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
@@ -1249,29 +1252,29 @@
 			encryp = false
 		}
 		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
-			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
 			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
 			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 			AesEncryption: encryp,
-			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
 			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
 			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
 		})
 	}
-	logger.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
 	return mcastTrafficQueues
 }
 
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
-	UsScheduler, _ := tpm.GetUsScheduler(tp)
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(ctx context.Context, tp *TechProfile) *tp_pb.TrafficScheduler {
+	UsScheduler, _ := tpm.GetUsScheduler(ctx, tp)
 
 	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
 		AllocId:   tp.UsScheduler.AllocID,
 		Scheduler: UsScheduler}
 }
 
-func (t *TechProfileMgr) GetGemportIDForPbit(tp interface{}, dir tp_pb.Direction, pbit uint32) uint32 {
+func (t *TechProfileMgr) GetGemportIDForPbit(ctx context.Context, tp interface{}, dir tp_pb.Direction, pbit uint32) uint32 {
 	/*
 	  Function to get the Gemport ID mapped to a pbit.
 	*/
@@ -1287,7 +1290,7 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
 							return tp.UpstreamGemPortAttributeList[gemCnt].GemportID
 						}
 					}
@@ -1303,14 +1306,14 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
 							return tp.DownstreamGemPortAttributeList[gemCnt].GemportID
 						}
 					}
 				}
 			}
 		}
-		logger.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+		logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	case *EponProfile:
 		if dir == tp_pb.Direction_UPSTREAM {
 			// upstream GEM ports
@@ -1322,7 +1325,7 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.UpstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-US-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.UpstreamQueueAttributeList[gemCnt].GemportID})
 							return tp.UpstreamQueueAttributeList[gemCnt].GemportID
 						}
 					}
@@ -1338,16 +1341,16 @@
 					// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 					if p, err := strconv.Atoi(string(tp.DownstreamQueueAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 						if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-							logger.Debugw("Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
+							logger.Debugw(ctx, "Found-DS-Queue-for-Pcp", log.Fields{"pbit": pbit, "Queue": tp.DownstreamQueueAttributeList[gemCnt].GemportID})
 							return tp.DownstreamQueueAttributeList[gemCnt].GemportID
 						}
 					}
 				}
 			}
 		}
-		logger.Errorw("No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+		logger.Errorw(ctx, "No-QueueId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	default:
-		logger.Errorw("unknown-tech", log.Fields{"tp": tp})
+		logger.Errorw(ctx, "unknown-tech", log.Fields{"tp": tp})
 	}
 	return 0
 }
@@ -1368,14 +1371,14 @@
 			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 				if tech == xgspon || tech == gpon {
 					if err = json.Unmarshal(value, &tpTech); err != nil {
-						logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 						continue
 					} else {
 						tpInstancesTech = append(tpInstancesTech, tpTech)
 					}
 				} else if tech == epon {
 					if err = json.Unmarshal(value, &tpEpon); err != nil {
-						logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+						logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 						continue
 					} else {
 						tpInstancesEpon = append(tpInstancesEpon, tpEpon)
@@ -1390,7 +1393,7 @@
 		case epon:
 			return tpInstancesEpon
 		default:
-			log.Errorw("unknown-technology", log.Fields{"tech": tech})
+			logger.Errorw(ctx, "unknown-technology", log.Fields{"tech": tech})
 			return nil
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
similarity index 60%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
index 8391a5b..ee67d2c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/techprofile/tech_profile_if.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/techprofile/tech_profile_if.go
@@ -19,23 +19,23 @@
 import (
 	"context"
 
-	"github.com/opencord/voltha-lib-go/v3/pkg/db"
-	tp_pb "github.com/opencord/voltha-protos/v3/go/tech_profile"
+	"github.com/opencord/voltha-lib-go/v4/pkg/db"
+	tp_pb "github.com/opencord/voltha-protos/v4/go/tech_profile"
 )
 
 type TechProfileIf interface {
-	SetKVClient() *db.Backend
-	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
+	SetKVClient(ctx context.Context, pathPrefix string) *db.Backend
+	GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string
 	GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (interface{}, error)
 	CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (interface{}, error)
 	DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
-	GetprotoBufParamValue(paramType string, paramKey string) int32
-	GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
-	GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32
+	GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
 	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
 		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
-	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
-	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
-	GetGemportIDForPbit(tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32
+	GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue
+	GetGemportIDForPbit(ctx context.Context, tp interface{}, Dir tp_pb.Direction, pbit uint32) uint32
 	FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) interface{}
 }
diff --git a/vendor/github.com/opencord/voltha-lib-go/v3/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go
similarity index 100%
rename from vendor/github.com/opencord/voltha-lib-go/v3/pkg/version/version.go
rename to vendor/github.com/opencord/voltha-lib-go/v4/pkg/version/version.go