Revert "[VOL-3069]Pass Context in methods which are performing logging and need the context"

This reverts commit 3c425fbeabed17ec8dad437678b4d105deaf2fbe.

Reason for revert: Merging higher-priority patches first.

Change-Id: Iaa03a5977357dcd86de358d76e90cc54cd6b1fa5
diff --git a/VERSION b/VERSION
index 3797f3f..5762a6f 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-3.1.17
+3.1.18
diff --git a/pkg/adapters/adapterif/events_proxy_if.go b/pkg/adapters/adapterif/events_proxy_if.go
index dbd8140..c144935 100644
--- a/pkg/adapters/adapterif/events_proxy_if.go
+++ b/pkg/adapters/adapterif/events_proxy_if.go
@@ -17,15 +17,14 @@
 package adapterif
 
 import (
-	"context"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
 )
 
 // EventProxy interface for eventproxy
 type EventProxy interface {
-	SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category EventCategory,
+	SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
-	SendKpiEvent(ctx context.Context, id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
+	SendKpiEvent(id string, deviceEvent *voltha.KpiEvent2, category EventCategory,
 		subCategory EventSubCategory, raisedTs int64) error
 }
 
diff --git a/pkg/adapters/common/adapter_proxy.go b/pkg/adapters/common/adapter_proxy.go
index ca44d0d..cd5750f 100644
--- a/pkg/adapters/common/adapter_proxy.go
+++ b/pkg/adapters/common/adapter_proxy.go
@@ -35,14 +35,14 @@
 	endpointMgr  kafka.EndpointManager
 }
 
-func NewAdapterProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
+func NewAdapterProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string, backend *db.Backend) *AdapterProxy {
 	proxy := AdapterProxy{
 		kafkaICProxy: kafkaProxy,
 		adapterTopic: adapterTopic,
 		coreTopic:    coreTopic,
 		endpointMgr:  kafka.NewEndpointManager(backend),
 	}
-	logger.Debugw(ctx, "topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw("topics", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 	return &proxy
 }
 
@@ -54,14 +54,14 @@
 	toDeviceId string,
 	proxyDeviceId string,
 	messageId string) error {
-	logger.Debugw(ctx, "sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
+	logger.Debugw("sending-inter-adapter-message", log.Fields{"type": msgType, "from": fromAdapter,
 		"to": toAdapter, "toDevice": toDeviceId, "proxyDevice": proxyDeviceId})
 
 	//Marshal the message
 	var marshalledMsg *any.Any
 	var err error
 	if marshalledMsg, err = ptypes.MarshalAny(msg); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-msg", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-msg", log.Fields{"error": err})
 		return err
 	}
 
@@ -90,7 +90,7 @@
 	}
 
 	// Set up the required rpc arguments
-	endpoint, err := ap.endpointMgr.GetEndpoint(ctx, toDeviceId, toAdapter)
+	endpoint, err := ap.endpointMgr.GetEndpoint(toDeviceId, toAdapter)
 	if err != nil {
 		return err
 	}
@@ -99,6 +99,6 @@
 	rpc := "process_inter_adapter_message"
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, proxyDeviceId, args...)
-	logger.Debugw(ctx, "inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(ctx, rpc, "", success, result)
+	logger.Debugw("inter-adapter-msg-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(rpc, "", success, result)
 }
diff --git a/pkg/adapters/common/adapter_proxy_test.go b/pkg/adapters/common/adapter_proxy_test.go
index 01a6603..3ba8290 100644
--- a/pkg/adapters/common/adapter_proxy_test.go
+++ b/pkg/adapters/common/adapter_proxy_test.go
@@ -39,11 +39,10 @@
 
 func init() {
 
-	ctx := context.Background()
 	var err error
 	embedEtcdServerPort, err = freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, "Cannot get freeport for KvClient")
+		logger.Fatal("Cannot get freeport for KvClient")
 	}
 }
 
@@ -55,8 +54,8 @@
 			Response: &voltha.Device{Id: "testDeviceId"},
 		},
 	}
-	backend := db.NewBackend(context.Background(), "etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
-	adapter := NewAdapterProxy(context.Background(), mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
+	backend := db.NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	adapter := NewAdapterProxy(mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
 
 	assert.NotNil(t, adapter)
 }
@@ -69,9 +68,9 @@
 			Response: &voltha.Device{Id: "testDeviceId"},
 		},
 	}
-	backend := db.NewBackend(context.Background(), "etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := db.NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 
-	adapter := NewAdapterProxy(context.Background(), mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
+	adapter := NewAdapterProxy(mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
 
 	adapter.endpointMgr = mocks.NewEndpointManager()
 
@@ -113,9 +112,9 @@
 			Response: &voltha.Device{Id: "testDeviceId"},
 		},
 	}
-	backend := db.NewBackend(context.Background(), "etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := db.NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 
-	adapter := NewAdapterProxy(context.Background(), mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
+	adapter := NewAdapterProxy(mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
 
 	adapter.endpointMgr = mocks.NewEndpointManager()
 
@@ -141,9 +140,9 @@
 			Response: &voltha.Device{Id: "testDeviceId"},
 		},
 	}
-	backend := db.NewBackend(context.Background(), "etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := db.NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 
-	adapter := NewAdapterProxy(context.Background(), mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
+	adapter := NewAdapterProxy(mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic", backend)
 
 	adapter.endpointMgr = mocks.NewEndpointManager()
 
diff --git a/pkg/adapters/common/common.go b/pkg/adapters/common/common.go
index ad8b11b..95a036d 100644
--- a/pkg/adapters/common/common.go
+++ b/pkg/adapters/common/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "common"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/adapters/common/core_proxy.go b/pkg/adapters/common/core_proxy.go
index 28b532f..20e1a52 100644
--- a/pkg/adapters/common/core_proxy.go
+++ b/pkg/adapters/common/core_proxy.go
@@ -37,28 +37,28 @@
 	lockDeviceIdCoreMap sync.RWMutex
 }
 
-func NewCoreProxy(ctx context.Context, kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
+func NewCoreProxy(kafkaProxy kafka.InterContainerProxy, adapterTopic string, coreTopic string) *CoreProxy {
 	var proxy CoreProxy
 	proxy.kafkaICProxy = kafkaProxy
 	proxy.adapterTopic = adapterTopic
 	proxy.coreTopic = coreTopic
 	proxy.deviceIdCoreMap = make(map[string]string)
 	proxy.lockDeviceIdCoreMap = sync.RWMutex{}
-	logger.Debugw(ctx, "TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
+	logger.Debugw("TOPICS", log.Fields{"core": proxy.coreTopic, "adapter": proxy.adapterTopic})
 
 	return &proxy
 }
 
-func unPackResponse(ctx context.Context, rpc string, deviceId string, success bool, response *a.Any) error {
+func unPackResponse(rpc string, deviceId string, success bool, response *a.Any) error {
 	if success {
 		return nil
 	} else {
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(response, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw(ctx, "response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
+		logger.Debugw("response", log.Fields{"rpc": rpc, "deviceId": deviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
 		return status.Errorf(codes.Canceled, "%s", unpackResult.Reason)
 	}
@@ -94,18 +94,18 @@
 }
 
 func (ap *CoreProxy) RegisterAdapter(ctx context.Context, adapter *voltha.Adapter, deviceTypes *voltha.DeviceTypes) error {
-	logger.Debugw(ctx, "registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
+	logger.Debugw("registering-adapter", log.Fields{"coreTopic": ap.coreTopic, "adapterTopic": ap.adapterTopic})
 	rpc := "Register"
 	topic := kafka.Topic{Name: ap.coreTopic}
 	replyToTopic := ap.getAdapterTopic()
 	args := make([]*kafka.KVArg, 2)
 
 	if adapter.TotalReplicas == 0 && adapter.CurrentReplica != 0 {
-		logger.Fatal(ctx, "totalReplicas can't be 0, since you're here you have at least one")
+		log.Fatal("totalReplicas can't be 0, since you're here you have at least one")
 	}
 
 	if adapter.CurrentReplica == 0 && adapter.TotalReplicas != 0 {
-		logger.Fatal(ctx, "currentReplica can't be 0, it has to start from 1")
+		log.Fatal("currentReplica can't be 0, it has to start from 1")
 	}
 
 	if adapter.CurrentReplica == 0 && adapter.TotalReplicas == 0 {
@@ -117,7 +117,7 @@
 	}
 
 	if adapter.CurrentReplica > adapter.TotalReplicas {
-		logger.Fatalf(ctx, "CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
+		log.Fatalf("CurrentReplica (%d) can't be greater than TotalReplicas (%d)",
 			adapter.CurrentReplica, adapter.TotalReplicas)
 	}
 
@@ -131,12 +131,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(ctx, rpc, &topic, &replyToTopic, true, "", args...)
-	logger.Debugw(ctx, "Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
-	return unPackResponse(ctx, rpc, "", success, result)
+	logger.Debugw("Register-Adapter-response", log.Fields{"replyTopic": replyToTopic, "success": success})
+	return unPackResponse(rpc, "", success, result)
 }
 
 func (ap *CoreProxy) DeviceUpdate(ctx context.Context, device *voltha.Device) error {
-	logger.Debugw(ctx, "DeviceUpdate", log.Fields{"deviceId": device.Id})
+	logger.Debugw("DeviceUpdate", log.Fields{"deviceId": device.Id})
 	rpc := "DeviceUpdate"
 	toTopic := ap.getCoreTopic(device.Id)
 	args := make([]*kafka.KVArg, 1)
@@ -147,12 +147,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, device.Id, args...)
-	logger.Debugw(ctx, "DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
-	return unPackResponse(ctx, rpc, device.Id, success, result)
+	logger.Debugw("DeviceUpdate-response", log.Fields{"deviceId": device.Id, "success": success})
+	return unPackResponse(rpc, device.Id, success, result)
 }
 
 func (ap *CoreProxy) PortCreated(ctx context.Context, deviceId string, port *voltha.Port) error {
-	logger.Debugw(ctx, "PortCreated", log.Fields{"portNo": port.PortNo})
+	logger.Debugw("PortCreated", log.Fields{"portNo": port.PortNo})
 	rpc := "PortCreated"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -171,12 +171,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("PortCreated-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) PortsStateUpdate(ctx context.Context, deviceId string, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "PortsStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("PortsStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "PortsStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -197,12 +197,12 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("PortsStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeleteAllPorts(ctx context.Context, deviceId string) error {
-	logger.Debugw(ctx, "DeleteAllPorts", log.Fields{"deviceId": deviceId})
+	logger.Debugw("DeleteAllPorts", log.Fields{"deviceId": deviceId})
 	rpc := "DeleteAllPorts"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -218,13 +218,13 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("DeleteAllPorts-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceStateUpdate(ctx context.Context, deviceId string,
 	connStatus voltha.ConnectStatus_Types, operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "DeviceStateUpdate", log.Fields{"deviceId": deviceId})
+	logger.Debugw("DeviceStateUpdate", log.Fields{"deviceId": deviceId})
 	rpc := "DeviceStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -249,13 +249,13 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("DeviceStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDeviceDetected(ctx context.Context, parentDeviceId string, parentPortNo int,
 	childDeviceType string, channelId int, vendorId string, serialNumber string, onuId int64) (*voltha.Device, error) {
-	logger.Debugw(ctx, "ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
+	logger.Debugw("ChildDeviceDetected", log.Fields{"pDeviceId": parentDeviceId, "channelId": channelId})
 	rpc := "ChildDeviceDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -300,12 +300,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("ChildDeviceDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -313,17 +313,17 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw(ctx, "ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("ChildDeviceDetected-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
 	}
 
 }
 
 func (ap *CoreProxy) ChildDevicesLost(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw("ChildDevicesLost", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesLost"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -338,12 +338,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+	logger.Debugw("ChildDevicesLost-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) ChildDevicesDetected(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
+	logger.Debugw("ChildDevicesDetected", log.Fields{"pDeviceId": parentDeviceId})
 	rpc := "ChildDevicesDetected"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -358,12 +358,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+	logger.Debugw("ChildDevicesDetected-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) GetDevice(ctx context.Context, parentDeviceId string, deviceId string) (*voltha.Device, error) {
-	logger.Debugw(ctx, "GetDevice", log.Fields{"deviceId": deviceId})
+	logger.Debugw("GetDevice", log.Fields{"deviceId": deviceId})
 	rpc := "GetDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -377,12 +377,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -390,16 +390,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw(ctx, "GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 		// TODO:  Need to get the real error code
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevice(ctx context.Context, parentDeviceId string, kwargs map[string]interface{}) (*voltha.Device, error) {
-	logger.Debugw(ctx, "GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
+	logger.Debugw("GetChildDevice", log.Fields{"parentDeviceId": parentDeviceId, "kwargs": kwargs})
 	rpc := "GetChildDevice"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -437,12 +437,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetChildDevice-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevice := &voltha.Device{}
 		if err := ptypes.UnmarshalAny(result, volthaDevice); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevice, nil
@@ -450,16 +450,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw(ctx, "GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetChildDevice-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) GetChildDevices(ctx context.Context, parentDeviceId string) (*voltha.Devices, error) {
-	logger.Debugw(ctx, "GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw("GetChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "GetChildDevices"
 
 	toTopic := ap.getCoreTopic(parentDeviceId)
@@ -473,12 +473,12 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	logger.Debugw("GetChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
 
 	if success {
 		volthaDevices := &voltha.Devices{}
 		if err := ptypes.UnmarshalAny(result, volthaDevices); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 			return nil, status.Error(codes.InvalidArgument, err.Error())
 		}
 		return volthaDevices, nil
@@ -486,16 +486,16 @@
 		unpackResult := &ic.Error{}
 		var err error
 		if err = ptypes.UnmarshalAny(result, unpackResult); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		}
-		logger.Debugw(ctx, "GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
+		logger.Debugw("GetChildDevices-return", log.Fields{"deviceid": parentDeviceId, "success": success, "error": err})
 
-		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(ctx, unpackResult.Code), unpackResult.Reason)
+		return nil, status.Error(ICProxyErrorCodeToGrpcErrorCode(unpackResult.Code), unpackResult.Reason)
 	}
 }
 
 func (ap *CoreProxy) SendPacketIn(ctx context.Context, deviceId string, port uint32, pktPayload []byte) error {
-	logger.Debugw(ctx, "SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
+	logger.Debugw("SendPacketIn", log.Fields{"deviceId": deviceId, "port": port, "pktPayload": pktPayload})
 	rpc := "PacketIn"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -519,12 +519,12 @@
 		Value: pkt,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("SendPacketIn-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DeviceReasonUpdate(ctx context.Context, deviceId string, deviceReason string) error {
-	logger.Debugw(ctx, "DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
+	logger.Debugw("DeviceReasonUpdate", log.Fields{"deviceId": deviceId, "deviceReason": deviceReason})
 	rpc := "DeviceReasonUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -543,12 +543,12 @@
 		Value: reason,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("DeviceReason-response", log.Fields{"pDeviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
 
 func (ap *CoreProxy) DevicePMConfigUpdate(ctx context.Context, pmConfigs *voltha.PmConfigs) error {
-	logger.Debugw(ctx, "DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
+	logger.Debugw("DevicePMConfigUpdate", log.Fields{"pmConfigs": pmConfigs})
 	rpc := "DevicePMConfigUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -561,12 +561,12 @@
 		Value: pmConfigs,
 	}
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, pmConfigs.Id, args...)
-	logger.Debugw(ctx, "DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
-	return unPackResponse(ctx, rpc, pmConfigs.Id, success, result)
+	logger.Debugw("DevicePMConfigUpdate-response", log.Fields{"pDeviceId": pmConfigs.Id, "success": success})
+	return unPackResponse(rpc, pmConfigs.Id, success, result)
 }
 
 func (ap *CoreProxy) ReconcileChildDevices(ctx context.Context, parentDeviceId string) error {
-	logger.Debugw(ctx, "ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
+	logger.Debugw("ReconcileChildDevices", log.Fields{"parentDeviceId": parentDeviceId})
 	rpc := "ReconcileChildDevices"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -578,13 +578,13 @@
 	}
 
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, parentDeviceId, args...)
-	logger.Debugw(ctx, "ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
-	return unPackResponse(ctx, rpc, parentDeviceId, success, result)
+	logger.Debugw("ReconcileChildDevices-response", log.Fields{"pDeviceId": parentDeviceId, "success": success})
+	return unPackResponse(rpc, parentDeviceId, success, result)
 }
 
 func (ap *CoreProxy) PortStateUpdate(ctx context.Context, deviceId string, pType voltha.Port_PortType, portNum uint32,
 	operStatus voltha.OperStatus_Types) error {
-	logger.Debugw(ctx, "PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
+	logger.Debugw("PortStateUpdate", log.Fields{"deviceId": deviceId, "portType": pType, "portNo": portNum, "operation_status": operStatus})
 	rpc := "PortStateUpdate"
 	// Use a device specific topic to send the request.  The adapter handling the device creates a device
 	// specific topic
@@ -615,6 +615,6 @@
 	// Use a device specific topic as we are the only adaptercore handling requests for this device
 	replyToTopic := ap.getAdapterTopic()
 	success, result := ap.kafkaICProxy.InvokeRPC(context.Background(), rpc, &toTopic, &replyToTopic, true, deviceId, args...)
-	logger.Debugw(ctx, "PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
-	return unPackResponse(ctx, rpc, deviceId, success, result)
+	logger.Debugw("PortStateUpdate-response", log.Fields{"deviceId": deviceId, "success": success})
+	return unPackResponse(rpc, deviceId, success, result)
 }
diff --git a/pkg/adapters/common/core_proxy_test.go b/pkg/adapters/common/core_proxy_test.go
index 2fb4df7..a1b4290 100644
--- a/pkg/adapters/common/core_proxy_test.go
+++ b/pkg/adapters/common/core_proxy_test.go
@@ -45,7 +45,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	adapter := &voltha.Adapter{
 		Id:      "testAdapter",
@@ -88,7 +88,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	adapter := &voltha.Adapter{
 		Id:             "testAdapter",
@@ -128,7 +128,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	kwargs := make(map[string]interface{})
 	kwargs["serial_number"] = "TEST00000000001"
@@ -159,7 +159,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	kwargs := make(map[string]interface{})
 	kwargs["onu_id"] = uint32(1234)
@@ -190,7 +190,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	kwargs := make(map[string]interface{})
 	kwargs["onu_id"] = uint32(1234)
@@ -213,7 +213,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	kwargs := make(map[string]interface{})
 	kwargs["onu_id"] = uint32(1234)
@@ -241,7 +241,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	parentDeviceId := "aabbcc"
 	devices, error := proxy.GetChildDevices(context.TODO(), parentDeviceId)
@@ -268,7 +268,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	parentDeviceId := "aabbcc"
 	devices, error := proxy.GetChildDevices(context.TODO(), parentDeviceId)
@@ -288,7 +288,7 @@
 		},
 	}
 
-	proxy := NewCoreProxy(context.Background(), &mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
+	proxy := NewCoreProxy(&mockKafkaIcProxy, "testAdapterTopic", "testCoreTopic")
 
 	parentDeviceId := "aabbcc"
 	devices, error := proxy.GetChildDevices(context.TODO(), parentDeviceId)
diff --git a/pkg/adapters/common/events_proxy.go b/pkg/adapters/common/events_proxy.go
index b79bafe..da9c9eb 100644
--- a/pkg/adapters/common/events_proxy.go
+++ b/pkg/adapters/common/events_proxy.go
@@ -17,7 +17,6 @@
 package common
 
 import (
-	"context"
 	"errors"
 	"fmt"
 	"strconv"
@@ -97,9 +96,9 @@
 }
 
 /* Send out device events*/
-func (ep *EventProxy) SendDeviceEvent(ctx context.Context, deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendDeviceEvent(deviceEvent *voltha.DeviceEvent, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if deviceEvent == nil {
-		logger.Error(ctx, "Recieved empty device event")
+		logger.Error("Recieved empty device event")
 		return errors.New("Device event nil")
 	}
 	var event voltha.Event
@@ -110,11 +109,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(ctx, &event); err != nil {
-		logger.Errorw(ctx, "Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
+	if err := ep.sendEvent(&event); err != nil {
+		logger.Errorw("Failed to send device event to KAFKA bus", log.Fields{"device-event": deviceEvent})
 		return err
 	}
-	logger.Infow(ctx, "Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow("Successfully sent device event KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "ResourceId": deviceEvent.ResourceId, "Context": deviceEvent.Context,
 		"DeviceEventName": deviceEvent.DeviceEventName})
@@ -124,9 +123,9 @@
 }
 
 // SendKpiEvent is to send kpi events to voltha.event topic
-func (ep *EventProxy) SendKpiEvent(ctx context.Context, id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
+func (ep *EventProxy) SendKpiEvent(id string, kpiEvent *voltha.KpiEvent2, category adapterif.EventCategory, subCategory adapterif.EventSubCategory, raisedTs int64) error {
 	if kpiEvent == nil {
-		logger.Error(ctx, "Recieved empty kpi event")
+		logger.Error("Recieved empty kpi event")
 		return errors.New("KPI event nil")
 	}
 	var event voltha.Event
@@ -137,11 +136,11 @@
 		return err
 	}
 	event.EventType = &de
-	if err := ep.sendEvent(ctx, &event); err != nil {
-		logger.Errorw(ctx, "Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
+	if err := ep.sendEvent(&event); err != nil {
+		logger.Errorw("Failed to send kpi event to KAFKA bus", log.Fields{"device-event": kpiEvent})
 		return err
 	}
-	logger.Infow(ctx, "Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
+	logger.Infow("Successfully sent kpi event to KAFKA", log.Fields{"Id": event.Header.Id, "Category": event.Header.Category,
 		"SubCategory": event.Header.SubCategory, "Type": event.Header.Type, "TypeVersion": event.Header.TypeVersion,
 		"ReportedTs": event.Header.ReportedTs, "KpiEventName": "STATS_EVENT"})
 
@@ -151,11 +150,11 @@
 
 /* TODO: Send out KPI events*/
 
-func (ep *EventProxy) sendEvent(ctx context.Context, event *voltha.Event) error {
-	if err := ep.kafkaClient.Send(ctx, event, &ep.eventTopic); err != nil {
+func (ep *EventProxy) sendEvent(event *voltha.Event) error {
+	if err := ep.kafkaClient.Send(event, &ep.eventTopic); err != nil {
 		return err
 	}
-	logger.Debugw(ctx, "Sent event to kafka", log.Fields{"event": event})
+	logger.Debugw("Sent event to kafka", log.Fields{"event": event})
 
 	return nil
 }
diff --git a/pkg/adapters/common/request_handler.go b/pkg/adapters/common/request_handler.go
index a92ed51..62d8cdd 100644
--- a/pkg/adapters/common/request_handler.go
+++ b/pkg/adapters/common/request_handler.go
@@ -16,7 +16,6 @@
 package common
 
 import (
-	"context"
 	"errors"
 
 	"github.com/golang/protobuf/ptypes"
@@ -59,9 +58,9 @@
 	return nil, nil
 }
 
-func (rhp *RequestHandlerProxy) Adopt_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Adopt_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -72,38 +71,38 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw(ctx, "Adopt_device", log.Fields{"deviceId": device.Id})
+	logger.Debugw("Adopt_device", log.Fields{"deviceId": device.Id})
 
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Adopt_device(ctx, device); err != nil {
+	if err := rhp.adapter.Adopt_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reconcile_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reconcile_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -115,17 +114,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -134,7 +133,7 @@
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 
 	//Invoke the reconcile device API on the adapter
-	if err := rhp.adapter.Reconcile_device(ctx, device); err != nil {
+	if err := rhp.adapter.Reconcile_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -144,9 +143,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Disable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Disable_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -158,17 +157,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -176,15 +175,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Disable_device API on the adapter
-	if err := rhp.adapter.Disable_device(ctx, device); err != nil {
+	if err := rhp.adapter.Disable_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reenable_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reenable_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -196,17 +195,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -214,15 +213,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reenable_device API on the adapter
-	if err := rhp.adapter.Reenable_device(ctx, device); err != nil {
+	if err := rhp.adapter.Reenable_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Reboot_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Reboot_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -234,17 +233,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -252,7 +251,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the Reboot_device API on the adapter
-	if err := rhp.adapter.Reboot_device(ctx, device); err != nil {
+	if err := rhp.adapter.Reboot_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -263,9 +262,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Delete_device(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Delete_device(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -277,17 +276,17 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return nil, err
 			}
 		}
@@ -295,7 +294,7 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(device.Id, fromTopic.Val)
 	//Invoke the delete_device API on the adapter
-	if err := rhp.adapter.Delete_device(ctx, device); err != nil {
+	if err := rhp.adapter.Delete_device(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -305,10 +304,10 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_bulk(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_flows_bulk")
+func (rhp *RequestHandlerProxy) Update_flows_bulk(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_flows_bulk")
 	if len(args) < 5 {
-		logger.Warn(ctx, "Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_flows_bulk-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -321,43 +320,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flows":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "groups":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw(ctx, "Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw("Update_flows_bulk", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the bulk flow update API of the adapter
-	if err := rhp.adapter.Update_flows_bulk(ctx, device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_bulk(device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_flows_incrementally(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_flows_incrementally")
+func (rhp *RequestHandlerProxy) Update_flows_incrementally(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_flows_incrementally")
 	if len(args) < 5 {
-		logger.Warn(ctx, "Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_flows_incrementally-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -370,43 +369,43 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, flows); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-flows", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-flows", log.Fields{"error": err})
 				return nil, err
 			}
 		case "group_changes":
 			if err := ptypes.UnmarshalAny(arg.Value, groups); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-groups", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-groups", log.Fields{"error": err})
 				return nil, err
 			}
 		case "flow_metadata":
 			if err := ptypes.UnmarshalAny(arg.Value, flowMetadata); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-metadata", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-metadata", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw(ctx, "Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
+	logger.Debugw("Update_flows_incrementally", log.Fields{"flows": flows, "groups": groups})
 	//Invoke the incremental flow update API of the adapter
-	if err := rhp.adapter.Update_flows_incrementally(ctx, device, flows, groups, flowMetadata); err != nil {
+	if err := rhp.adapter.Update_flows_incrementally(device, flows, groups, flowMetadata); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Update_pm_config(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debug(ctx, "Update_pm_config")
+func (rhp *RequestHandlerProxy) Update_pm_config(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debug("Update_pm_config")
 	if len(args) < 2 {
-		logger.Warn(ctx, "Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Update_pm_config-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -417,33 +416,33 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pm_configs":
 			if err := ptypes.UnmarshalAny(arg.Value, pmConfigs); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-pm-configs", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-pm-configs", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw(ctx, "Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
+	logger.Debugw("Update_pm_config", log.Fields{"deviceId": device.Id, "pmConfigs": pmConfigs})
 	//Invoke the pm config update API of the adapter
-	if err := rhp.adapter.Update_pm_config(ctx, device, pmConfigs); err != nil {
+	if err := rhp.adapter.Update_pm_config(device, pmConfigs); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Receive_packet_out(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
-	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) Receive_packet_out(args []*ic.Argument) (*empty.Empty, error) {
+	logger.Debugw("Receive_packet_out", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn(ctx, "Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("Receive_packet_out-invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -455,29 +454,29 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-deviceId", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-deviceId", log.Fields{"error": err})
 				return nil, err
 			}
 		case "outPort":
 			if err := ptypes.UnmarshalAny(arg.Value, egressPort); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-egressPort", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-egressPort", log.Fields{"error": err})
 				return nil, err
 			}
 		case "packet":
 			if err := ptypes.UnmarshalAny(arg.Value, packet); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-packet", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-packet", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw(ctx, "Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
+	logger.Debugw("Receive_packet_out", log.Fields{"deviceId": deviceId.Val, "outPort": egressPort, "packet": packet})
 	//Invoke the adopt device on the adapter
-	if err := rhp.adapter.Receive_packet_out(ctx, deviceId.Val, int(egressPort.Val), packet); err != nil {
+	if err := rhp.adapter.Receive_packet_out(deviceId.Val, int(egressPort.Val), packet); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return new(empty.Empty), nil
@@ -491,9 +490,9 @@
 	return new(empty.Empty), nil
 }
 
-func (rhp *RequestHandlerProxy) Get_ofp_device_info(ctx context.Context, args []*ic.Argument) (*ic.SwitchCapability, error) {
+func (rhp *RequestHandlerProxy) Get_ofp_device_info(args []*ic.Argument) (*ic.SwitchCapability, error) {
 	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -503,31 +502,31 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"deviceId": device.Id})
+	logger.Debugw("Get_ofp_device_info", log.Fields{"deviceId": device.Id})
 
 	var cap *ic.SwitchCapability
 	var err error
-	if cap, err = rhp.adapter.Get_ofp_device_info(ctx, device); err != nil {
+	if cap, err = rhp.adapter.Get_ofp_device_info(device); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
-	logger.Debugw(ctx, "Get_ofp_device_info", log.Fields{"cap": cap})
+	logger.Debugw("Get_ofp_device_info", log.Fields{"cap": cap})
 	return cap, nil
 }
 
-func (rhp *RequestHandlerProxy) Process_inter_adapter_message(ctx context.Context, args []*ic.Argument) (*empty.Empty, error) {
+func (rhp *RequestHandlerProxy) Process_inter_adapter_message(args []*ic.Argument) (*empty.Empty, error) {
 	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -537,21 +536,21 @@
 		switch arg.Key {
 		case "msg":
 			if err := ptypes.UnmarshalAny(arg.Value, iaMsg); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case kafka.TransactionKey:
 			if err := ptypes.UnmarshalAny(arg.Value, transactionID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
 
-	logger.Debugw(ctx, "Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
+	logger.Debugw("Process_inter_adapter_message", log.Fields{"msgId": iaMsg.Header.Id})
 
 	//Invoke the inter adapter API on the handler
-	if err := rhp.adapter.Process_inter_adapter_message(ctx, iaMsg); err != nil {
+	if err := rhp.adapter.Process_inter_adapter_message(iaMsg); err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 
@@ -578,30 +577,30 @@
 	return &voltha.ImageDownload{}, nil
 }
 
-func (rhp *RequestHandlerProxy) Enable_port(ctx context.Context, args []*ic.Argument) error {
-	logger.Debugw(ctx, "enable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+func (rhp *RequestHandlerProxy) Enable_port(args []*ic.Argument) error {
+	logger.Debugw("enable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(args)
 	if err != nil {
-		logger.Warnw(ctx, "enable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw("enable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Enable_port(ctx, deviceId, port)
+	return rhp.adapter.Enable_port(deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) Disable_port(ctx context.Context, args []*ic.Argument) error {
-	logger.Debugw(ctx, "disable_port", log.Fields{"args": args})
-	deviceId, port, err := rhp.getEnableDisableParams(ctx, args)
+func (rhp *RequestHandlerProxy) Disable_port(args []*ic.Argument) error {
+	logger.Debugw("disable_port", log.Fields{"args": args})
+	deviceId, port, err := rhp.getEnableDisableParams(args)
 	if err != nil {
-		logger.Warnw(ctx, "disable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
+		logger.Warnw("disable_port", log.Fields{"args": args, "deviceId": deviceId, "port": port})
 		return err
 	}
-	return rhp.adapter.Disable_port(ctx, deviceId, port)
+	return rhp.adapter.Disable_port(deviceId, port)
 }
 
-func (rhp *RequestHandlerProxy) getEnableDisableParams(ctx context.Context, args []*ic.Argument) (string, *voltha.Port, error) {
-	logger.Debugw(ctx, "getEnableDisableParams", log.Fields{"args": args})
+func (rhp *RequestHandlerProxy) getEnableDisableParams(args []*ic.Argument) (string, *voltha.Port, error) {
+	logger.Debugw("getEnableDisableParams", log.Fields{"args": args})
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		return "", nil, errors.New("invalid-number-of-args")
 	}
 	deviceId := &ic.StrType{}
@@ -610,12 +609,12 @@
 		switch arg.Key {
 		case "deviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, deviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return "", nil, err
 			}
 		case "port":
 			if err := ptypes.UnmarshalAny(arg.Value, port); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
 				return "", nil, err
 			}
 		}
@@ -623,9 +622,9 @@
 	return deviceId.Val, port, nil
 }
 
-func (rhp *RequestHandlerProxy) Child_device_lost(ctx context.Context, args []*ic.Argument) error {
+func (rhp *RequestHandlerProxy) Child_device_lost(args []*ic.Argument) error {
 	if len(args) < 4 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		return errors.New("invalid-number-of-args")
 	}
 
@@ -637,22 +636,22 @@
 		switch arg.Key {
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
 				return err
 			}
 		case "pPortNo":
 			if err := ptypes.UnmarshalAny(arg.Value, pPortNo); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-port", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-port", log.Fields{"error": err})
 				return err
 			}
 		case "onuID":
 			if err := ptypes.UnmarshalAny(arg.Value, onuID); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-transaction-ID", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-transaction-ID", log.Fields{"error": err})
 				return err
 			}
 		case kafka.FromTopic:
 			if err := ptypes.UnmarshalAny(arg.Value, fromTopic); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-from-topic", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-from-topic", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -660,15 +659,15 @@
 	//Update the core reference for that device
 	rhp.coreProxy.UpdateCoreReference(pDeviceId.Val, fromTopic.Val)
 	//Invoke the Child_device_lost API on the adapter
-	if err := rhp.adapter.Child_device_lost(ctx, pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
+	if err := rhp.adapter.Child_device_lost(pDeviceId.Val, uint32(pPortNo.Val), uint32(onuID.Val)); err != nil {
 		return status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return nil
 }
 
-func (rhp *RequestHandlerProxy) Start_omci_test(ctx context.Context, args []*ic.Argument) (*ic.TestResponse, error) {
+func (rhp *RequestHandlerProxy) Start_omci_test(args []*ic.Argument) (*ic.TestResponse, error) {
 	if len(args) < 2 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		err := errors.New("invalid-number-of-args")
 		return nil, err
 	}
@@ -682,26 +681,26 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "omcitestrequest":
 			if err := ptypes.UnmarshalAny(arg.Value, request); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-omcitestrequest", log.Fields{"error": err})
 				return nil, err
 			}
 		}
 	}
-	logger.Debugw(ctx, "Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
-	result, err := rhp.adapter.Start_omci_test(ctx, device, request)
+	logger.Debugw("Start_omci_test", log.Fields{"device-id": device.Id, "req": request})
+	result, err := rhp.adapter.Start_omci_test(device, request)
 	if err != nil {
 		return nil, status.Errorf(codes.NotFound, "%s", err.Error())
 	}
 	return result, nil
 }
-func (rhp *RequestHandlerProxy) Get_ext_value(ctx context.Context, args []*ic.Argument) (*voltha.ReturnValues, error) {
+func (rhp *RequestHandlerProxy) Get_ext_value(args []*ic.Argument) (*voltha.ReturnValues, error) {
 	if len(args) < 3 {
-		logger.Warn(ctx, "invalid-number-of-args", log.Fields{"args": args})
+		logger.Warn("invalid-number-of-args", log.Fields{"args": args})
 		return nil, errors.New("invalid-number-of-args")
 	}
 
@@ -712,24 +711,24 @@
 		switch arg.Key {
 		case "device":
 			if err := ptypes.UnmarshalAny(arg.Value, device); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-device", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-device", log.Fields{"error": err})
 				return nil, err
 			}
 		case "pDeviceId":
 			if err := ptypes.UnmarshalAny(arg.Value, pDeviceId); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-parent-deviceId", log.Fields{"error": err})
 				return nil, err
 			}
 		case "valuetype":
 			if err := ptypes.UnmarshalAny(arg.Value, valuetype); err != nil {
-				logger.Warnw(ctx, "cannot-unmarshal-valuetype", log.Fields{"error": err})
+				logger.Warnw("cannot-unmarshal-valuetype", log.Fields{"error": err})
 				return nil, err
 			}
 		default:
-			logger.Warnw(ctx, "key-not-found", log.Fields{"arg.Key": arg.Key})
+			logger.Warnw("key-not-found", log.Fields{"arg.Key": arg.Key})
 		}
 	}
 
 	//Invoke the Get_value API on the adapter
-	return rhp.adapter.Get_ext_value(ctx, pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
+	return rhp.adapter.Get_ext_value(pDeviceId.Val, device, voltha.ValueType_Type(valuetype.Val))
 }
diff --git a/pkg/adapters/common/utils.go b/pkg/adapters/common/utils.go
index 3d91119..94e8bd6 100644
--- a/pkg/adapters/common/utils.go
+++ b/pkg/adapters/common/utils.go
@@ -16,7 +16,6 @@
 package common
 
 import (
-	"context"
 	"fmt"
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
@@ -76,7 +75,7 @@
 	return string(b)
 }
 
-func ICProxyErrorCodeToGrpcErrorCode(ctx context.Context, icErr ic.ErrorCodeCodes) codes.Code {
+func ICProxyErrorCodeToGrpcErrorCode(icErr ic.ErrorCodeCodes) codes.Code {
 	switch icErr {
 	case ic.ErrorCode_INVALID_PARAMETERS:
 		return codes.InvalidArgument
@@ -85,7 +84,7 @@
 	case ic.ErrorCode_DEADLINE_EXCEEDED:
 		return codes.DeadlineExceeded
 	default:
-		logger.Warnw(ctx, "cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
+		logger.Warnw("cannnot-map-ic-error-code-to-grpc-error-code", log.Fields{"err": icErr})
 		return codes.Internal
 	}
 }
diff --git a/pkg/adapters/common/utils_test.go b/pkg/adapters/common/utils_test.go
index a93dd2f..87594fd 100644
--- a/pkg/adapters/common/utils_test.go
+++ b/pkg/adapters/common/utils_test.go
@@ -16,7 +16,6 @@
 package common
 
 import (
-	"context"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 	"github.com/stretchr/testify/assert"
 	"google.golang.org/grpc/codes"
@@ -73,12 +72,12 @@
 }
 
 func TestICProxyErrorCodeToGrpcErrorCode(t *testing.T) {
-	unsupported := ICProxyErrorCodeToGrpcErrorCode(context.Background(), ic.ErrorCode_UNSUPPORTED_REQUEST)
+	unsupported := ICProxyErrorCodeToGrpcErrorCode(ic.ErrorCode_UNSUPPORTED_REQUEST)
 	assert.Equal(t, unsupported, codes.Unavailable)
 
-	invalid := ICProxyErrorCodeToGrpcErrorCode(context.Background(), ic.ErrorCode_INVALID_PARAMETERS)
+	invalid := ICProxyErrorCodeToGrpcErrorCode(ic.ErrorCode_INVALID_PARAMETERS)
 	assert.Equal(t, invalid, codes.InvalidArgument)
 
-	timeout := ICProxyErrorCodeToGrpcErrorCode(context.Background(), ic.ErrorCode_DEADLINE_EXCEEDED)
+	timeout := ICProxyErrorCodeToGrpcErrorCode(ic.ErrorCode_DEADLINE_EXCEEDED)
 	assert.Equal(t, timeout, codes.DeadlineExceeded)
 }
diff --git a/pkg/adapters/iAdapter.go b/pkg/adapters/iAdapter.go
index ce0b791..1e81890 100644
--- a/pkg/adapters/iAdapter.go
+++ b/pkg/adapters/iAdapter.go
@@ -16,7 +16,6 @@
 package adapters
 
 import (
-	"context"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 	"github.com/opencord/voltha-protos/v3/go/openflow_13"
 	"github.com/opencord/voltha-protos/v3/go/voltha"
@@ -24,34 +23,34 @@
 
 //IAdapter represents the set of APIs a voltha adapter has to support.
 type IAdapter interface {
-	Adapter_descriptor(ctx context.Context) error
-	Device_types(ctx context.Context) (*voltha.DeviceTypes, error)
-	Health(ctx context.Context) (*voltha.HealthStatus, error)
-	Adopt_device(ctx context.Context, device *voltha.Device) error
-	Reconcile_device(ctx context.Context, device *voltha.Device) error
-	Abandon_device(ctx context.Context, device *voltha.Device) error
-	Disable_device(ctx context.Context, device *voltha.Device) error
-	Reenable_device(ctx context.Context, device *voltha.Device) error
-	Reboot_device(ctx context.Context, device *voltha.Device) error
-	Self_test_device(ctx context.Context, device *voltha.Device) error
-	Delete_device(ctx context.Context, device *voltha.Device) error
-	Get_device_details(ctx context.Context, device *voltha.Device) error
-	Update_flows_bulk(ctx context.Context, device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
-	Update_flows_incrementally(ctx context.Context, device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
-	Update_pm_config(ctx context.Context, device *voltha.Device, pm_configs *voltha.PmConfigs) error
-	Receive_packet_out(ctx context.Context, deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
-	Suppress_event(ctx context.Context, filter *voltha.EventFilter) error
-	Unsuppress_event(ctx context.Context, filter *voltha.EventFilter) error
-	Get_ofp_device_info(ctx context.Context, device *voltha.Device) (*ic.SwitchCapability, error)
-	Process_inter_adapter_message(ctx context.Context, msg *ic.InterAdapterMessage) error
-	Download_image(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Get_image_download_status(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Cancel_image_download(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Activate_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Revert_image_update(ctx context.Context, device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
-	Enable_port(ctx context.Context, deviceId string, port *voltha.Port) error
-	Disable_port(ctx context.Context, deviceId string, port *voltha.Port) error
-	Child_device_lost(ctx context.Context, parentDeviceId string, parentPortNo uint32, onuID uint32) error
-	Start_omci_test(ctx context.Context, device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
-	Get_ext_value(ctx context.Context, deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
+	Adapter_descriptor() error
+	Device_types() (*voltha.DeviceTypes, error)
+	Health() (*voltha.HealthStatus, error)
+	Adopt_device(device *voltha.Device) error
+	Reconcile_device(device *voltha.Device) error
+	Abandon_device(device *voltha.Device) error
+	Disable_device(device *voltha.Device) error
+	Reenable_device(device *voltha.Device) error
+	Reboot_device(device *voltha.Device) error
+	Self_test_device(device *voltha.Device) error
+	Delete_device(device *voltha.Device) error
+	Get_device_details(device *voltha.Device) error
+	Update_flows_bulk(device *voltha.Device, flows *voltha.Flows, groups *voltha.FlowGroups, flowMetadata *voltha.FlowMetadata) error
+	Update_flows_incrementally(device *voltha.Device, flows *openflow_13.FlowChanges, groups *openflow_13.FlowGroupChanges, flowMetadata *voltha.FlowMetadata) error
+	Update_pm_config(device *voltha.Device, pm_configs *voltha.PmConfigs) error
+	Receive_packet_out(deviceId string, egress_port_no int, msg *openflow_13.OfpPacketOut) error
+	Suppress_event(filter *voltha.EventFilter) error
+	Unsuppress_event(filter *voltha.EventFilter) error
+	Get_ofp_device_info(device *voltha.Device) (*ic.SwitchCapability, error)
+	Process_inter_adapter_message(msg *ic.InterAdapterMessage) error
+	Download_image(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Get_image_download_status(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Cancel_image_download(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Activate_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Revert_image_update(device *voltha.Device, request *voltha.ImageDownload) (*voltha.ImageDownload, error)
+	Enable_port(deviceId string, port *voltha.Port) error
+	Disable_port(deviceId string, port *voltha.Port) error
+	Child_device_lost(parentDeviceId string, parentPortNo uint32, onuID uint32) error
+	Start_omci_test(device *voltha.Device, request *voltha.OmciTestRequest) (*voltha.TestResponse, error)
+	Get_ext_value(deviceId string, device *voltha.Device, valueflag voltha.ValueType_Type) (*voltha.ReturnValues, error)
 }
diff --git a/pkg/config/common.go b/pkg/config/common.go
index 06b8b3c..37e05fd 100644
--- a/pkg/config/common.go
+++ b/pkg/config/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "config"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/config/configmanager.go b/pkg/config/configmanager.go
index 11aa8e6..24988be 100644
--- a/pkg/config/configmanager.go
+++ b/pkg/config/configmanager.go
@@ -96,14 +96,14 @@
 	kvStoreEventChan chan *kvstore.Event
 }
 
-func NewConfigManager(ctx context.Context, kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
+func NewConfigManager(kvClient kvstore.Client, kvStoreType, kvStoreAddress string, kvStoreTimeout time.Duration) *ConfigManager {
 	var kvStorePrefix string
 	if prefix, present := os.LookupEnv("KV_STORE_DATAPATH_PREFIX"); present {
 		kvStorePrefix = prefix
-		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
+		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is set, ", log.Fields{"kvStoreDataPathPrefix": kvStorePrefix})
 	} else {
 		kvStorePrefix = defaultkvStoreDataPathPrefix
-		logger.Infow(ctx, "KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
+		logger.Infow("KV_STORE_DATAPATH_PREFIX env variable is not set, using default", log.Fields{"kvStoreDataPathPrefix": defaultkvStoreDataPathPrefix})
 	}
 	return &ConfigManager{
 		KVStoreConfigPrefix:   defaultkvStoreConfigPath,
@@ -176,31 +176,31 @@
 func (c *ComponentConfig) MonitorForConfigChange(ctx context.Context) chan *ConfigChangeEvent {
 	key := c.makeConfigPath()
 
-	logger.Debugw(ctx, "monitoring-for-config-change", log.Fields{"key": key})
+	logger.Debugw("monitoring-for-config-change", log.Fields{"key": key})
 
 	c.changeEventChan = make(chan *ConfigChangeEvent, 1)
 
 	c.kvStoreEventChan = c.cManager.Backend.CreateWatch(ctx, key, true)
 
-	go c.processKVStoreWatchEvents(ctx)
+	go c.processKVStoreWatchEvents()
 
 	return c.changeEventChan
 }
 
 // processKVStoreWatchEvents process event channel recieved from the Backend for any ChangeType
 // It checks for the EventType is valid or not.For the valid EventTypes creates ConfigChangeEvent and send it on channel
-func (c *ComponentConfig) processKVStoreWatchEvents(ctx context.Context) {
+func (c *ComponentConfig) processKVStoreWatchEvents() {
 
 	ccKeyPrefix := c.makeConfigPath()
 
-	logger.Debugw(ctx, "processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
+	logger.Debugw("processing-kvstore-event-change", log.Fields{"key-prefix": ccKeyPrefix})
 
 	ccPathPrefix := c.cManager.Backend.PathPrefix + ccKeyPrefix + kvStorePathSeparator
 
 	for watchResp := range c.kvStoreEventChan {
 
 		if watchResp.EventType == kvstore.CONNECTIONDOWN || watchResp.EventType == kvstore.UNKNOWN {
-			logger.Warnw(ctx, "received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
+			logger.Warnw("received-invalid-change-type-in-watch-channel-from-kvstore", log.Fields{"change-type": watchResp.EventType})
 			continue
 		}
 
@@ -220,7 +220,7 @@
 func (c *ComponentConfig) Retrieve(ctx context.Context, configKey string) (string, error) {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw(ctx, "retrieving-config", log.Fields{"key": key})
+	logger.Debugw("retrieving-config", log.Fields{"key": key})
 
 	if kvpair, err := c.cManager.Backend.Get(ctx, key); err != nil {
 		return "", err
@@ -230,7 +230,7 @@
 		}
 
 		value := strings.Trim(fmt.Sprintf("%s", kvpair.Value), "\"")
-		logger.Debugw(ctx, "retrieved-config", log.Fields{"key": key, "value": value})
+		logger.Debugw("retrieved-config", log.Fields{"key": key, "value": value})
 		return value, nil
 	}
 }
@@ -238,7 +238,7 @@
 func (c *ComponentConfig) RetrieveAll(ctx context.Context) (map[string]string, error) {
 	key := c.makeConfigPath()
 
-	logger.Debugw(ctx, "retreiving-list", log.Fields{"key": key})
+	logger.Debugw("retreiving-list", log.Fields{"key": key})
 
 	data, err := c.cManager.Backend.List(ctx, key)
 	if err != nil {
@@ -261,7 +261,7 @@
 func (c *ComponentConfig) Save(ctx context.Context, configKey string, configValue string) error {
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw(ctx, "saving-config", log.Fields{"key": key, "value": configValue})
+	logger.Debugw("saving-config", log.Fields{"key": key, "value": configValue})
 
 	//save the data for update config
 	if err := c.cManager.Backend.Put(ctx, key, configValue); err != nil {
@@ -274,7 +274,7 @@
 	//construct key using makeConfigPath
 	key := c.makeConfigPath() + "/" + configKey
 
-	logger.Debugw(ctx, "deleting-config", log.Fields{"key": key})
+	logger.Debugw("deleting-config", log.Fields{"key": key})
 	//delete the config
 	if err := c.cManager.Backend.Delete(ctx, key); err != nil {
 		return err
diff --git a/pkg/config/logcontroller.go b/pkg/config/logcontroller.go
index f83e383..b00569f 100644
--- a/pkg/config/logcontroller.go
+++ b/pkg/config/logcontroller.go
@@ -51,8 +51,9 @@
 	initialLogLevel     string // Initial default log level set by helm chart
 }
 
-func NewComponentLogController(ctx context.Context, cm *ConfigManager) (*ComponentLogController, error) {
-	logger.Debug(ctx, "creating-new-component-log-controller")
+func NewComponentLogController(cm *ConfigManager) (*ComponentLogController, error) {
+
+	logger.Debug("creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
 		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
@@ -79,17 +80,17 @@
 // Then, it persists initial default Loglevels into Config Store before
 // starting the loading and processing of all Log Configuration
 func StartLogLevelConfigProcessing(cm *ConfigManager, ctx context.Context) {
-	cc, err := NewComponentLogController(ctx, cm)
+	cc, err := NewComponentLogController(cm)
 	if err != nil {
-		logger.Errorw(ctx, "unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
+		logger.Errorw("unable-to-construct-component-log-controller-instance-for-log-config-monitoring", log.Fields{"error": err})
 		return
 	}
 
 	cc.GlobalConfig = cm.InitComponentConfig(globalConfigRootNode, ConfigTypeLogLevel)
-	logger.Debugw(ctx, "global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
+	logger.Debugw("global-log-config", log.Fields{"cc-global-config": cc.GlobalConfig})
 
 	cc.componentNameConfig = cm.InitComponentConfig(cc.ComponentName, ConfigTypeLogLevel)
-	logger.Debugw(ctx, "component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
+	logger.Debugw("component-log-config", log.Fields{"cc-component-name-config": cc.componentNameConfig})
 
 	cc.persistInitialDefaultLogConfigs(ctx)
 
@@ -104,21 +105,21 @@
 
 	_, err := c.GlobalConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw(ctx, "failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw("failed-to-retrieve-global-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.GlobalConfig.Save(ctx, defaultLogLevelKey, initialGlobalDefaultLogLevelValue)
 		if err != nil {
-			logger.Errorw(ctx, "failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
+			logger.Errorw("failed-to-persist-global-default-log-config-at-startup", log.Fields{"error": err, "loglevel": initialGlobalDefaultLogLevelValue})
 		}
 	}
 
 	_, err = c.componentNameConfig.Retrieve(ctx, defaultLogLevelKey)
 	if err != nil {
-		logger.Debugw(ctx, "failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
+		logger.Debugw("failed-to-retrieve-component-default-log-config-at-startup", log.Fields{"error": err})
 
 		err = c.componentNameConfig.Save(ctx, defaultLogLevelKey, c.initialLogLevel)
 		if err != nil {
-			logger.Errorw(ctx, "failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
+			logger.Errorw("failed-to-persist-component-default-log-config-at-startup", log.Fields{"error": err, "loglevel": c.initialLogLevel})
 		}
 	}
 }
@@ -128,7 +129,7 @@
 func (c *ComponentLogController) persistRegisteredLogPackageList(ctx context.Context) {
 
 	componentMetadataConfig := c.configManager.InitComponentConfig(c.ComponentName, ConfigTypeMetadata)
-	logger.Debugw(ctx, "component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
+	logger.Debugw("component-metadata-config", log.Fields{"component-metadata-config": componentMetadataConfig})
 
 	packageList := log.GetPackageNames()
 	packageList = append(packageList, defaultLogLevelKey)
@@ -136,12 +137,12 @@
 
 	packageNames, err := json.Marshal(packageList)
 	if err != nil {
-		logger.Errorw(ctx, "failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
+		logger.Errorw("failed-to-marshal-log-package-list-for-storage", log.Fields{"error": err, "packageList": packageList})
 		return
 	}
 
 	if err := componentMetadataConfig.Save(ctx, logPackagesListKey, string(packageNames)); err != nil {
-		logger.Errorw(ctx, "failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
+		logger.Errorw("failed-to-persist-component-registered-log-package-list-at-startup", log.Fields{"error": err, "packageNames": packageNames})
 	}
 }
 
@@ -154,10 +155,10 @@
 	// Load and apply Log Config for first time
 	initialLogConfig, err := c.buildUpdatedLogConfig(ctx)
 	if err != nil {
-		logger.Warnw(ctx, "unable-to-load-log-config-at-startup", log.Fields{"error": err})
+		logger.Warnw("unable-to-load-log-config-at-startup", log.Fields{"error": err})
 	} else {
-		if err := c.loadAndApplyLogConfig(ctx, initialLogConfig); err != nil {
-			logger.Warnw(ctx, "unable-to-apply-log-config-at-startup", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(initialLogConfig); err != nil {
+			logger.Warnw("unable-to-apply-log-config-at-startup", log.Fields{"error": err})
 		}
 	}
 
@@ -173,25 +174,25 @@
 		case configEvent = <-componentConfigEventChan:
 
 		}
-		logger.Debugw(ctx, "processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
+		logger.Debugw("processing-log-config-change", log.Fields{"ChangeType": configEvent.ChangeType, "Package": configEvent.ConfigAttribute})
 
 		updatedLogConfig, err := c.buildUpdatedLogConfig(ctx)
 		if err != nil {
-			logger.Warnw(ctx, "unable-to-fetch-updated-log-config", log.Fields{"error": err})
+			logger.Warnw("unable-to-fetch-updated-log-config", log.Fields{"error": err})
 			continue
 		}
 
-		logger.Debugw(ctx, "applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
+		logger.Debugw("applying-updated-log-config", log.Fields{"updated-log-config": updatedLogConfig})
 
-		if err := c.loadAndApplyLogConfig(ctx, updatedLogConfig); err != nil {
-			logger.Warnw(ctx, "unable-to-load-and-apply-log-config", log.Fields{"error": err})
+		if err := c.loadAndApplyLogConfig(updatedLogConfig); err != nil {
+			logger.Warnw("unable-to-load-and-apply-log-config", log.Fields{"error": err})
 		}
 	}
 
 }
 
 // get active loglevel from the zap logger
-func getActiveLogLevels(ctx context.Context) map[string]string {
+func getActiveLogLevels() map[string]string {
 	loglevels := make(map[string]string)
 
 	// now do the default log level
@@ -203,7 +204,7 @@
 	for _, packageName := range log.GetPackageNames() {
 		level, err := log.GetPackageLogLevel(packageName)
 		if err != nil {
-			logger.Warnw(ctx, "unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
+			logger.Warnw("unable-to-fetch-current-active-loglevel-for-package-name", log.Fields{"package-name": packageName, "error": err})
 			continue
 		}
 
@@ -212,7 +213,7 @@
 		}
 	}
 
-	logger.Debugw(ctx, "retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
+	logger.Debugw("retreived-log-levels-from-zap-logger", log.Fields{"loglevels": loglevels})
 
 	return loglevels
 }
@@ -227,16 +228,16 @@
 	// Handle edge cases when global default loglevel is deleted directly from etcd or set to a invalid value
 	// We should use hard-coded initial default value in such cases
 	if globalDefaultLogLevel == "" {
-		logger.Warn(ctx, "global-default-loglevel-not-found-in-config-store")
+		logger.Warn("global-default-loglevel-not-found-in-config-store")
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
 	if _, err := log.StringToLogLevel(globalDefaultLogLevel); err != nil {
-		logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
+		logger.Warnw("unsupported-loglevel-config-defined-at-global-default", log.Fields{"log-level": globalDefaultLogLevel})
 		globalDefaultLogLevel = initialGlobalDefaultLogLevelValue
 	}
 
-	logger.Debugw(ctx, "retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
+	logger.Debugw("retrieved-global-default-loglevel", log.Fields{"level": globalDefaultLogLevel})
 
 	return globalDefaultLogLevel, nil
 }
@@ -250,7 +251,7 @@
 	effectiveDefaultLogLevel := ""
 	for logConfigKey, logConfigValue := range componentLogConfig {
 		if _, err := log.StringToLogLevel(logConfigValue); err != nil || logConfigKey == "" {
-			logger.Warnw(ctx, "unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
+			logger.Warnw("unsupported-loglevel-config-defined-at-component-context", log.Fields{"package-name": logConfigKey, "log-level": logConfigValue})
 			delete(componentLogConfig, logConfigKey)
 		} else {
 			if logConfigKey == defaultLogLevelKey {
@@ -267,7 +268,7 @@
 
 	componentLogConfig[defaultLogLevelKey] = effectiveDefaultLogLevel
 
-	logger.Debugw(ctx, "retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
+	logger.Debugw("retrieved-component-log-config", log.Fields{"component-log-level": componentLogConfig})
 
 	return componentLogConfig, nil
 }
@@ -281,7 +282,7 @@
 func (c *ComponentLogController) buildUpdatedLogConfig(ctx context.Context) (map[string]string, error) {
 	globalLogLevel, err := c.getGlobalLogConfig(ctx)
 	if err != nil {
-		logger.Errorw(ctx, "unable-to-retrieve-global-log-config", log.Fields{"err": err})
+		logger.Errorw("unable-to-retrieve-global-log-config", log.Fields{"err": err})
 	}
 
 	componentLogConfig, err := c.getComponentLogConfig(ctx, globalLogLevel)
@@ -301,17 +302,17 @@
 // create hash of loaded configuration using GenerateLogConfigHash
 // if there is previous hash stored, compare the hash to stored hash
 // if there is any change will call UpdateLogLevels
-func (c *ComponentLogController) loadAndApplyLogConfig(ctx context.Context, logConfig map[string]string) error {
+func (c *ComponentLogController) loadAndApplyLogConfig(logConfig map[string]string) error {
 	currentLogHash, err := GenerateLogConfigHash(logConfig)
 	if err != nil {
 		return err
 	}
 
 	if c.logHash != currentLogHash {
-		updateLogLevels(ctx, logConfig)
+		UpdateLogLevels(logConfig)
 		c.logHash = currentLogHash
 	} else {
-		logger.Debug(ctx, "effective-loglevel-config-same-as-currently-active")
+		logger.Debug("effective-loglevel-config-same-as-currently-active")
 	}
 
 	return nil
@@ -321,7 +322,7 @@
 // to identify and create map of modified Log Levels of 2 types:
 // - Packages for which log level has been changed
 // - Packages for which log level config has been cleared - set to default log level
-func createModifiedLogLevels(ctx context.Context, activeLogLevels, updatedLogLevels map[string]string) map[string]string {
+func createModifiedLogLevels(activeLogLevels, updatedLogLevels map[string]string) map[string]string {
 	defaultLevel := updatedLogLevels[defaultLogLevelKey]
 
 	modifiedLogLevels := make(map[string]string)
@@ -338,7 +339,7 @@
 	// Log warnings for all invalid packages for which log config has been set
 	for key, value := range updatedLogLevels {
 		if _, exist := activeLogLevels[key]; !exist {
-			logger.Warnw(ctx, "ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
+			logger.Warnw("ignoring-loglevel-set-for-invalid-package", log.Fields{"package": key, "log-level": value})
 		}
 	}
 
@@ -348,18 +349,18 @@
 // updateLogLevels update the loglevels for the component
 // retrieve active confguration from logger
 // compare with entries one by one and apply
-func updateLogLevels(ctx context.Context, updatedLogConfig map[string]string) {
+func UpdateLogLevels(updatedLogConfig map[string]string) {
 
-	activeLogLevels := getActiveLogLevels(ctx)
-	changedLogLevels := createModifiedLogLevels(ctx, activeLogLevels, updatedLogConfig)
+	activeLogLevels := getActiveLogLevels()
+	changedLogLevels := createModifiedLogLevels(activeLogLevels, updatedLogConfig)
 
 	// If no changed log levels are found, just return. It may happen on configuration of a invalid package
 	if len(changedLogLevels) == 0 {
-		logger.Debug(ctx, "no-change-in-effective-loglevel-config")
+		logger.Debug("no-change-in-effective-loglevel-config")
 		return
 	}
 
-	logger.Debugw(ctx, "applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
+	logger.Debugw("applying-log-level-for-modified-packages", log.Fields{"changed-log-levels": changedLogLevels})
 	for key, level := range changedLogLevels {
 		if key == defaultLogLevelKey {
 			if l, err := log.StringToLogLevel(level); err == nil {
diff --git a/pkg/db/backend.go b/pkg/db/backend.go
index f595dc1..1e23a0f 100644
--- a/pkg/db/backend.go
+++ b/pkg/db/backend.go
@@ -47,7 +47,7 @@
 }
 
 // NewBackend creates a new instance of a Backend structure
-func NewBackend(ctx context.Context, storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
+func NewBackend(storeType string, address string, timeout time.Duration, pathPrefix string) *Backend {
 	var err error
 
 	b := &Backend{
@@ -59,8 +59,8 @@
 		alive:                   false, // connection considered down at start
 	}
 
-	if b.Client, err = b.newClient(ctx, address, timeout); err != nil {
-		logger.Errorw(ctx, "failed-to-create-kv-client",
+	if b.Client, err = b.newClient(address, timeout); err != nil {
+		logger.Errorw("failed-to-create-kv-client",
 			log.Fields{
 				"type": storeType, "address": address,
 				"timeout": timeout, "prefix": pathPrefix,
@@ -71,22 +71,22 @@
 	return b
 }
 
-func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
+func (b *Backend) newClient(address string, timeout time.Duration) (kvstore.Client, error) {
 	switch b.StoreType {
 	case "consul":
-		return kvstore.NewConsulClient(ctx, address, timeout)
+		return kvstore.NewConsulClient(address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func (b *Backend) makePath(ctx context.Context, key string) string {
+func (b *Backend) makePath(key string) string {
 	path := fmt.Sprintf("%s/%s", b.PathPrefix, key)
 	return path
 }
 
-func (b *Backend) updateLiveness(ctx context.Context, alive bool) {
+func (b *Backend) updateLiveness(alive bool) {
 	// Periodically push stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Push alive state if the
@@ -94,11 +94,11 @@
 	if b.liveness != nil {
 
 		if b.alive != alive {
-			logger.Debug(ctx, "update-liveness-channel-reason-change")
+			logger.Debug("update-liveness-channel-reason-change")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		} else if time.Since(b.lastLivenessTime) > b.LivenessChannelInterval {
-			logger.Debug(ctx, "update-liveness-channel-reason-interval")
+			logger.Debug("update-liveness-channel-reason-interval")
 			b.liveness <- alive
 			b.lastLivenessTime = time.Now()
 		}
@@ -106,7 +106,7 @@
 
 	// Emit log message only for alive state change
 	if b.alive != alive {
-		logger.Debugw(ctx, "change-kvstore-alive-status", log.Fields{"alive": alive})
+		logger.Debugw("change-kvstore-alive-status", log.Fields{"alive": alive})
 		b.alive = alive
 	}
 }
@@ -115,9 +115,9 @@
 // post on Liveness channel
 func (b *Backend) PerformLivenessCheck(ctx context.Context) bool {
 	alive := b.Client.IsConnectionUp(ctx)
-	logger.Debugw(ctx, "kvstore-liveness-check-result", log.Fields{"alive": alive})
+	logger.Debugw("kvstore-liveness-check-result", log.Fields{"alive": alive})
 
-	b.updateLiveness(ctx, alive)
+	b.updateLiveness(alive)
 	return alive
 }
 
@@ -126,11 +126,11 @@
 // or not the connection is still Live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (b *Backend) EnableLivenessChannel(ctx context.Context) chan bool {
-	logger.Debug(ctx, "enable-kvstore-liveness-channel")
+func (b *Backend) EnableLivenessChannel() chan bool {
+	logger.Debug("enable-kvstore-liveness-channel")
 
 	if b.liveness == nil {
-		logger.Debug(ctx, "create-kvstore-liveness-channel")
+		logger.Debug("create-kvstore-liveness-channel")
 
 		// Channel size of 10 to avoid any possibility of blocking in Load conditions
 		b.liveness = make(chan bool, 10)
@@ -144,7 +144,7 @@
 }
 
 // Extract Alive status of Kvstore based on type of error
-func (b *Backend) isErrorIndicatingAliveKvstore(ctx context.Context, err error) bool {
+func (b *Backend) isErrorIndicatingAliveKvstore(err error) bool {
 	// Alive unless observed an error indicating so
 	alive := true
 
@@ -182,64 +182,64 @@
 
 // List retrieves one or more items that match the specified key
 func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "listing-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(key)
+	logger.Debugw("listing-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.List(ctx, formattedPath)
 
-	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
 
 	return pair, err
 }
 
 // Get retrieves an item that matches the specified key
 func (b *Backend) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "getting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(key)
+	logger.Debugw("getting-key", log.Fields{"key": key, "path": formattedPath})
 
 	pair, err := b.Client.Get(ctx, formattedPath)
 
-	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
 
 	return pair, err
 }
 
 // Put stores an item value under the specifed key
 func (b *Backend) Put(ctx context.Context, key string, value interface{}) error {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "putting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(key)
+	logger.Debugw("putting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Put(ctx, formattedPath, value)
 
-	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
 
 	return err
 }
 
 // Delete removes an item under the specified key
 func (b *Backend) Delete(ctx context.Context, key string) error {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "deleting-key", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key", log.Fields{"key": key, "path": formattedPath})
 
 	err := b.Client.Delete(ctx, formattedPath)
 
-	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+	b.updateLiveness(b.isErrorIndicatingAliveKvstore(err))
 
 	return err
 }
 
 // CreateWatch starts watching events for the specified key
 func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "creating-key-watch", log.Fields{"key": key, "path": formattedPath})
+	formattedPath := b.makePath(key)
+	logger.Debugw("creating-key-watch", log.Fields{"key": key, "path": formattedPath})
 
 	return b.Client.Watch(ctx, formattedPath, withPrefix)
 }
 
 // DeleteWatch stops watching events for the specified key
-func (b *Backend) DeleteWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
-	formattedPath := b.makePath(ctx, key)
-	logger.Debugw(ctx, "deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
+func (b *Backend) DeleteWatch(key string, ch chan *kvstore.Event) {
+	formattedPath := b.makePath(key)
+	logger.Debugw("deleting-key-watch", log.Fields{"key": key, "path": formattedPath})
 
-	b.Client.CloseWatch(ctx, formattedPath, ch)
+	b.Client.CloseWatch(formattedPath, ch)
 }
diff --git a/pkg/db/backend_test.go b/pkg/db/backend_test.go
index 98b72a0..b8c66bd 100644
--- a/pkg/db/backend_test.go
+++ b/pkg/db/backend_test.go
@@ -41,37 +41,35 @@
 )
 
 func TestMain(m *testing.M) {
-	ctx := context.Background()
 	var err error
 	embedEtcdServerPort, err = freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
 	dummyEtcdServerPort, err = freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
 	peerPort, err := freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
-	etcdServer := mocks.StartEtcdServer(ctx, mocks.MKConfig(ctx, "voltha.db.test", embedEtcdServerPort, peerPort, "voltha.lib.db", "error"))
+	etcdServer := mocks.StartEtcdServer(mocks.MKConfig("voltha.db.test", embedEtcdServerPort, peerPort, "voltha.lib.db", "error"))
 	res := m.Run()
 
-	etcdServer.Stop(ctx)
+	etcdServer.Stop()
 	os.Exit(res)
 }
 
 func provisionBackendWithEmbeddedEtcdServer(t *testing.T) *Backend {
-	ctx := context.Background()
-	backend := NewBackend(ctx, "etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 	assert.NotNil(t, backend)
 	assert.NotNil(t, backend.Client)
 	return backend
 }
 
 func provisionBackendWithDummyEtcdServer(t *testing.T) *Backend {
-	backend := NewBackend(context.Background(), "etcd", embedEtcdServerHost+":"+strconv.Itoa(dummyEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := NewBackend("etcd", embedEtcdServerHost+":"+strconv.Itoa(dummyEtcdServerPort), defaultTimeout, defaultPathPrefix)
 	assert.NotNil(t, backend)
 	assert.NotNil(t, backend.Client)
 	return backend
@@ -80,7 +78,7 @@
 // Create instance using Etcd Kvstore
 func TestNewBackend_EtcdKvStore(t *testing.T) {
 	address := embedEtcdServerHost + ":" + strconv.Itoa(embedEtcdServerPort)
-	backend := NewBackend(context.Background(), "etcd", address, defaultTimeout, defaultPathPrefix)
+	backend := NewBackend("etcd", address, defaultTimeout, defaultPathPrefix)
 
 	// Verify all attributes of backend have got set correctly
 	assert.NotNil(t, backend)
@@ -96,7 +94,7 @@
 
 // Create instance using Consul Kvstore
 func TestNewBackend_ConsulKvStore(t *testing.T) {
-	backend := NewBackend(context.Background(), "consul", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := NewBackend("consul", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 
 	// Verify kvstore type attribute of backend has got set correctly
 	assert.NotNil(t, backend)
@@ -106,7 +104,7 @@
 
 // Create instance using Invalid Kvstore; instance creation should fail
 func TestNewBackend_InvalidKvstore(t *testing.T) {
-	backend := NewBackend(context.Background(), "unknown", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
+	backend := NewBackend("unknown", embedEtcdServerHost+":"+strconv.Itoa(embedEtcdServerPort), defaultTimeout, defaultPathPrefix)
 
 	assert.NotNil(t, backend)
 	assert.Nil(t, backend.Client)
@@ -114,7 +112,7 @@
 
 func TestMakePath(t *testing.T) {
 	backend := provisionBackendWithEmbeddedEtcdServer(t)
-	path := backend.makePath(context.Background(), "Suffix")
+	path := backend.makePath("Suffix")
 	assert.Equal(t, defaultPathPrefix+"/Suffix", path)
 }
 
@@ -140,7 +138,7 @@
 func TestEnableLivenessChannel_EmbeddedEtcdServer_BeforeLivenessCheck(t *testing.T) {
 	backend := provisionBackendWithEmbeddedEtcdServer(t)
 
-	alive := backend.EnableLivenessChannel(context.Background())
+	alive := backend.EnableLivenessChannel()
 	assert.NotNil(t, alive)
 	assert.Equal(t, 1, len(alive))
 	assert.Equal(t, false, <-alive)
@@ -154,7 +152,7 @@
 	defer cancel()
 	backend.PerformLivenessCheck(ctx)
 
-	alive := backend.EnableLivenessChannel(ctx)
+	alive := backend.EnableLivenessChannel()
 	assert.NotNil(t, alive)
 	assert.Equal(t, 1, len(alive))
 	assert.Equal(t, true, <-alive)
@@ -165,14 +163,14 @@
 func TestUpdateLiveness_AliveStatusChange(t *testing.T) {
 	backend := provisionBackendWithEmbeddedEtcdServer(t)
 	// Enable Liveness Channel and verify initial state is not-alive
-	aliveState := backend.EnableLivenessChannel(context.Background())
+	aliveState := backend.EnableLivenessChannel()
 	assert.NotNil(t, aliveState)
 	assert.Equal(t, 1, len(backend.liveness))
 	assert.Equal(t, false, <-backend.liveness)
 	lastUpdateTime := backend.lastLivenessTime
 
 	// Update with changed alive state. Verify alive state push & liveness time update
-	backend.updateLiveness(context.Background(), true)
+	backend.updateLiveness(true)
 	assert.Equal(t, 1, len(backend.liveness))
 	assert.Equal(t, true, <-backend.liveness)
 	assert.NotEqual(t, lastUpdateTime, backend.lastLivenessTime)
@@ -182,13 +180,13 @@
 func TestUpdateLiveness_AliveStatusUnchanged(t *testing.T) {
 	backend := provisionBackendWithEmbeddedEtcdServer(t)
 	// Enable Liveness Channel and verify initial state is not-alive
-	aliveState := backend.EnableLivenessChannel(context.Background())
+	aliveState := backend.EnableLivenessChannel()
 	assert.NotNil(t, aliveState)
 	assert.Equal(t, false, <-backend.liveness)
 	lastUpdateTime := backend.lastLivenessTime
 
 	// Update with same alive state. Verify no further alive state push
-	backend.updateLiveness(context.Background(), false)
+	backend.updateLiveness(false)
 	assert.Equal(t, 0, len(backend.liveness))
 	assert.Equal(t, lastUpdateTime, backend.lastLivenessTime)
 
@@ -197,7 +195,7 @@
 	backend.lastLivenessTime = time.Now().Add(-tenMinDuration)
 	lastUpdateTime = backend.lastLivenessTime
 
-	backend.updateLiveness(context.Background(), false)
+	backend.updateLiveness(false)
 	assert.Equal(t, 1, len(backend.liveness))
 	assert.Equal(t, false, <-backend.liveness)
 	assert.NotEqual(t, lastUpdateTime, backend.lastLivenessTime)
@@ -225,7 +223,7 @@
 
 	for _, tt := range tests {
 		t.Run(tt.name, func(t *testing.T) {
-			if backend.isErrorIndicatingAliveKvstore(context.Background(), tt.arg) != tt.want {
+			if backend.isErrorIndicatingAliveKvstore(tt.arg) != tt.want {
 				t.Errorf("isErrorIndicatingAliveKvstore failed for %s: expected %t but got %t", tt.name, tt.want, !tt.want)
 			}
 		})
@@ -413,7 +411,7 @@
 	time.Sleep(time.Millisecond * 100)
 	assert.Equal(t, 1, len(eventChan))
 
-	backend.DeleteWatch(context.Background(), "key5", eventChan)
+	backend.DeleteWatch("key5", eventChan)
 }
 
 // Test Create and Delete Watch with prefix for Embedded Etcd Server
@@ -434,5 +432,5 @@
 	time.Sleep(time.Millisecond * 100)
 	assert.Equal(t, 1, len(eventChan))
 
-	backend.DeleteWatch(context.Background(), "key6", eventChan)
+	backend.DeleteWatch("key6", eventChan)
 }
diff --git a/pkg/db/common.go b/pkg/db/common.go
index fe84b46..1cf2e1c 100644
--- a/pkg/db/common.go
+++ b/pkg/db/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "db"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/db/kvstore/client.go b/pkg/db/kvstore/client.go
index 480d476..158e626 100644
--- a/pkg/db/kvstore/client.go
+++ b/pkg/db/kvstore/client.go
@@ -88,6 +88,6 @@
 	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
 	ReleaseLock(lockName string) error
 	IsConnectionUp(ctx context.Context) bool // timeout in second
-	CloseWatch(ctx context.Context, key string, ch chan *Event)
-	Close(ctx context.Context)
+	CloseWatch(key string, ch chan *Event)
+	Close()
 }
diff --git a/pkg/db/kvstore/common.go b/pkg/db/kvstore/common.go
index 0de395f..aa7aeb0 100644
--- a/pkg/db/kvstore/common.go
+++ b/pkg/db/kvstore/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kvstore"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/db/kvstore/consulclient.go b/pkg/db/kvstore/consulclient.go
index c2cd841..d2544dd 100644
--- a/pkg/db/kvstore/consulclient.go
+++ b/pkg/db/kvstore/consulclient.go
@@ -44,13 +44,14 @@
 }
 
 // NewConsulClient returns a new client for the Consul KV store
-func NewConsulClient(ctx context.Context, addr string, timeout time.Duration) (*ConsulClient, error) {
+func NewConsulClient(addr string, timeout time.Duration) (*ConsulClient, error) {
+
 	config := consulapi.DefaultConfig()
 	config.Address = addr
 	config.WaitTime = timeout
 	consul, err := consulapi.NewClient(config)
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 
@@ -62,7 +63,7 @@
 
 // IsConnectionUp returns whether the connection to the Consul KV store is up
 func (c *ConsulClient) IsConnectionUp(ctx context.Context) bool {
-	logger.Error(ctx, "Unimplemented function")
+	logger.Error("Unimplemented function")
 	return false
 }
 
@@ -79,7 +80,7 @@
 	// For now we ignore meta data
 	kvps, _, err := kv.List(key, &queryOptions)
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -102,7 +103,7 @@
 	// For now we ignore meta data
 	kvp, _, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 	if kvp != nil {
@@ -121,7 +122,7 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(ctx, er)
+		logger.Error(er)
 		return er
 	}
 
@@ -133,7 +134,7 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Put(&kvp, &writeOptions)
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return err
 	}
 	return nil
@@ -148,26 +149,26 @@
 	defer c.writeLock.Unlock()
 	_, err := kv.Delete(key, &writeOptions)
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return err
 	}
 	return nil
 }
 
-func (c *ConsulClient) deleteSession(ctx context.Context) {
+func (c *ConsulClient) deleteSession() {
 	if c.sessionID != "" {
-		logger.Debug(ctx, "cleaning-up-session")
+		logger.Debug("cleaning-up-session")
 		session := c.consul.Session()
 		_, err := session.Destroy(c.sessionID, nil)
 		if err != nil {
-			logger.Errorw(ctx, "error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
+			logger.Errorw("error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
 		}
 	}
 	c.sessionID = ""
 	c.session = nil
 }
 
-func (c *ConsulClient) createSession(ctx context.Context, ttl time.Duration, retries int) (*consulapi.Session, string, error) {
+func (c *ConsulClient) createSession(ttl time.Duration, retries int) (*consulapi.Session, string, error) {
 	session := c.consul.Session()
 	entry := &consulapi.SessionEntry{
 		Behavior: consulapi.SessionBehaviorDelete,
@@ -177,17 +178,17 @@
 	for {
 		id, meta, err := session.Create(entry, nil)
 		if err != nil {
-			logger.Errorw(ctx, "create-session-error", log.Fields{"error": err})
+			logger.Errorw("create-session-error", log.Fields{"error": err})
 			if retries == 0 {
 				return nil, "", err
 			}
 		} else if meta.RequestTime == 0 {
-			logger.Errorw(ctx, "create-session-bad-meta-data", log.Fields{"meta-data": meta})
+			logger.Errorw("create-session-bad-meta-data", log.Fields{"meta-data": meta})
 			if retries == 0 {
 				return nil, "", errors.New("bad-meta-data")
 			}
 		} else if id == "" {
-			logger.Error(ctx, "create-session-nil-id")
+			logger.Error("create-session-nil-id")
 			if retries == 0 {
 				return nil, "", errors.New("ID-nil")
 			}
@@ -198,7 +199,7 @@
 		if retries > 0 {
 			retries--
 		}
-		logger.Debug(ctx, "retrying-session-create-after-a-second-delay")
+		logger.Debug("retrying-session-create-after-a-second-delay")
 		time.Sleep(time.Duration(1) * time.Second)
 	}
 }
@@ -225,30 +226,30 @@
 	var val []byte
 	var er error
 	if val, er = ToByte(value); er != nil {
-		logger.Error(ctx, er)
+		logger.Error(er)
 		return nil, er
 	}
 
 	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
 	if c.sessionID != "" {
-		c.deleteSession(ctx)
+		c.deleteSession()
 	}
 
 	// Clear session if reservation is not successful
 	reservationSuccessful := false
 	defer func() {
 		if !reservationSuccessful {
-			logger.Debug(ctx, "deleting-session")
-			c.deleteSession(ctx)
+			logger.Debug("deleting-session")
+			c.deleteSession()
 		}
 	}()
 
-	session, sessionID, err := c.createSession(ctx, ttl, -1)
+	session, sessionID, err := c.createSession(ttl, -1)
 	if err != nil {
-		logger.Errorw(ctx, "no-session-created", log.Fields{"error": err})
+		logger.Errorw("no-session-created", log.Fields{"error": err})
 		return "", errors.New("no-session-created")
 	}
-	logger.Debugw(ctx, "session-created", log.Fields{"session-id": sessionID})
+	logger.Debugw("session-created", log.Fields{"session-id": sessionID})
 	c.sessionID = sessionID
 	c.session = session
 
@@ -257,11 +258,11 @@
 	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
 	result, _, err := kv.Acquire(&kvp, nil)
 	if err != nil {
-		logger.Errorw(ctx, "error-acquiring-keys", log.Fields{"error": err})
+		logger.Errorw("error-acquiring-keys", log.Fields{"error": err})
 		return nil, err
 	}
 
-	logger.Debugw(ctx, "key-acquired", log.Fields{"key": key, "status": result})
+	logger.Debugw("key-acquired", log.Fields{"key": key, "status": result})
 
 	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
 	m, err := c.Get(ctx, key)
@@ -269,7 +270,7 @@
 		return nil, err
 	}
 	if m != nil {
-		logger.Debugw(ctx, "response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
+		logger.Debugw("response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
 		if m.Key == key && isEqual(m.Value, value) {
 			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
 			// per session.
@@ -299,11 +300,11 @@
 		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
 		result, _, err = kv.Release(&kvp, nil)
 		if err != nil {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		if !result {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key})
 		}
 		delete(c.keyReservations, key)
 	}
@@ -383,21 +384,21 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *ConsulClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+func (c *ConsulClient) CloseWatch(key string, ch chan *Event) {
 	// First close the context
 	var ok bool
 	var watchedChannelsContexts []*channelContextMap
 	c.writeLock.Lock()
 	defer c.writeLock.Unlock()
 	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
-		logger.Errorw(ctx, "key-has-no-watched-context-or-channel", log.Fields{"key": key})
+		logger.Errorw("key-has-no-watched-context-or-channel", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chCtxMap := range watchedChannelsContexts {
 		if chCtxMap.channel == ch {
-			logger.Debug(ctx, "channel-found")
+			logger.Debug("channel-found")
 			chCtxMap.cancel()
 			//close the channel
 			close(ch)
@@ -409,7 +410,7 @@
 	if pos >= 0 {
 		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
 	}
-	logger.Debugw(ctx, "watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
+	logger.Debugw("watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
 }
 
 func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
@@ -429,10 +430,10 @@
 	return true
 }
 
-func (c *ConsulClient) listenForKeyChange(ctx context.Context, key string, ch chan *Event) {
-	logger.Debugw(ctx, "start-watching-channel", log.Fields{"key": key, "channel": ch})
+func (c *ConsulClient) listenForKeyChange(watchContext context.Context, key string, ch chan *Event) {
+	logger.Debugw("start-watching-channel", log.Fields{"key": key, "channel": ch})
 
-	defer c.CloseWatch(ctx, key, ch)
+	defer c.CloseWatch(key, ch)
 	kv := c.consul.KV()
 	var queryOptions consulapi.QueryOptions
 	queryOptions.WaitTime = defaultKVGetTimeout
@@ -440,7 +441,7 @@
 	// Get the existing value, if any
 	previousKVPair, meta, err := kv.Get(key, &queryOptions)
 	if err != nil {
-		logger.Debug(ctx, err)
+		logger.Debug(err)
 	}
 	lastIndex := meta.LastIndex
 
@@ -448,37 +449,37 @@
 	//var waitOptions consulapi.QueryOptions
 	var pair *consulapi.KVPair
 	//watchContext, _ := context.WithCancel(context.Background())
-	waitOptions := queryOptions.WithContext(ctx)
+	waitOptions := queryOptions.WithContext(watchContext)
 	for {
 		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
 		waitOptions.WaitIndex = lastIndex
 		pair, meta, err = kv.Get(key, waitOptions)
 		select {
-		case <-ctx.Done():
-			logger.Debug(ctx, "done-event-received-exiting")
+		case <-watchContext.Done():
+			logger.Debug("done-event-received-exiting")
 			return
 		default:
 			if err != nil {
-				logger.Warnw(ctx, "error-from-watch", log.Fields{"error": err})
+				logger.Warnw("error-from-watch", log.Fields{"error": err})
 				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
 			} else {
-				logger.Debugw(ctx, "index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
+				logger.Debugw("index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
 			}
 		}
 		if err != nil {
-			logger.Debug(ctx, err)
+			logger.Debug(err)
 			// On error, block for 10 milliseconds to prevent endless loop
 			time.Sleep(10 * time.Millisecond)
 		} else if meta.LastIndex <= lastIndex {
-			logger.Info(ctx, "no-index-change-or-negative")
+			logger.Info("no-index-change-or-negative")
 		} else {
-			logger.Debugw(ctx, "update-received", log.Fields{"pair": pair})
+			logger.Debugw("update-received", log.Fields{"pair": pair})
 			if pair == nil {
 				ch <- NewEvent(DELETE, key, []byte(""), -1)
 			} else if !c.isKVEqual(pair, previousKVPair) {
 				// Push the change onto the channel if the data has changed
 				// For now just assume it's a PUT change
-				logger.Debugw(ctx, "pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
+				logger.Debugw("pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
 				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
 			}
 			previousKVPair = pair
@@ -488,7 +489,7 @@
 }
 
 // Close closes the KV store client
-func (c *ConsulClient) Close(ctx context.Context) {
+func (c *ConsulClient) Close() {
 	var writeOptions consulapi.WriteOptions
 	// Inform any goroutine it's time to say goodbye.
 	c.writeLock.Lock()
@@ -499,7 +500,7 @@
 
 	// Clear the sessionID
 	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
-		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/pkg/db/kvstore/etcdclient.go b/pkg/db/kvstore/etcdclient.go
index 0165e18..8d4a462 100644
--- a/pkg/db/kvstore/etcdclient.go
+++ b/pkg/db/kvstore/etcdclient.go
@@ -40,7 +40,7 @@
 }
 
 // NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+func NewEtcdClient(addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
 	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
 
 	c, err := v3Client.New(v3Client.Config{
@@ -49,7 +49,7 @@
 		LogConfig:   &logconfig,
 	})
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 
@@ -77,7 +77,7 @@
 func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
 	resp, err := c.ectdAPI.Get(ctx, key, v3Client.WithPrefix())
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 	m := make(map[string]*KVPair)
@@ -94,7 +94,7 @@
 	resp, err := c.ectdAPI.Get(ctx, key)
 
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 	for _, ev := range resp.Kvs {
@@ -131,13 +131,13 @@
 	if err != nil {
 		switch err {
 		case context.Canceled:
-			logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			logger.Warnw("context-cancelled", log.Fields{"error": err})
 		case context.DeadlineExceeded:
-			logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
+			logger.Warnw("context-deadline-exceeded", log.Fields{"error": err})
 		case v3rpcTypes.ErrEmptyKey:
-			logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			logger.Warnw("etcd-client-error", log.Fields{"error": err})
 		default:
-			logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
+			logger.Warnw("bad-endpoints", log.Fields{"error": err})
 		}
 		return err
 	}
@@ -150,10 +150,10 @@
 
 	// delete the key
 	if _, err := c.ectdAPI.Delete(ctx, key); err != nil {
-		logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
+		logger.Errorw("failed-to-delete-key", log.Fields{"key": key, "error": err})
 		return err
 	}
-	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+	logger.Debugw("key(s)-deleted", log.Fields{"key": key})
 	return nil
 }
 
@@ -172,7 +172,7 @@
 
 	resp, err := c.ectdAPI.Grant(ctx, int64(ttl.Seconds()))
 	if err != nil {
-		logger.Error(ctx, err)
+		logger.Error(err)
 		return nil, err
 	}
 	// Register the lease id
@@ -185,7 +185,7 @@
 	defer func() {
 		if !reservationSuccessful {
 			if err = c.ReleaseReservation(context.Background(), key); err != nil {
-				logger.Error(ctx, "cannot-release-lease")
+				logger.Error("cannot-release-lease")
 			}
 		}
 	}()
@@ -240,7 +240,7 @@
 	for key, leaseID := range c.keyReservations {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
+			logger.Errorw("cannot-release-reservation", log.Fields{"key": key, "error": err})
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -251,7 +251,7 @@
 // ReleaseReservation releases reservation for a specific key.
 func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
 	// Get the leaseid using the key
-	logger.Debugw(ctx, "Release-reservation", log.Fields{"key": key})
+	logger.Debugw("Release-reservation", log.Fields{"key": key})
 	var ok bool
 	var leaseID *v3Client.LeaseID
 	c.keyReservationsLock.Lock()
@@ -263,7 +263,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.Revoke(ctx, *leaseID)
 		if err != nil {
-			logger.Error(ctx, err)
+			logger.Error(err)
 			return err
 		}
 		delete(c.keyReservations, key)
@@ -288,7 +288,7 @@
 	if leaseID != nil {
 		_, err := c.ectdAPI.KeepAliveOnce(ctx, *leaseID)
 		if err != nil {
-			logger.Errorw(ctx, "lease-may-have-expired", log.Fields{"error": err})
+			logger.Errorw("lease-may-have-expired", log.Fields{"error": err})
 			return err
 		}
 	} else {
@@ -320,9 +320,9 @@
 
 	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
 	// json format.
-	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+	logger.Debugw("watched-channels", log.Fields{"len": len(channelMaps)})
 	// Launch a go routine to listen for updates
-	go c.listenForKeyChange(ctx, channel, ch, cancel)
+	go c.listenForKeyChange(channel, ch, cancel)
 
 	return ch
 
@@ -369,23 +369,23 @@
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+func (c *EtcdClient) CloseWatch(key string, ch chan *Event) {
 	// Get the array of channels mapping
 	var watchedChannels []map[chan *Event]v3Client.Watcher
 	var ok bool
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
-		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		logger.Warnw("key-has-no-watched-channels", log.Fields{"key": key})
 		return
 	}
 	// Look for the channels
 	var pos = -1
 	for i, chMap := range watchedChannels {
 		if t, ok := chMap[ch]; ok {
-			logger.Debug(ctx, "channel-found")
+			logger.Debug("channel-found")
 			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
 			if err := t.Close(); err != nil {
-				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+				logger.Errorw("watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
 			}
 			pos = i
 			break
@@ -397,11 +397,11 @@
 	if pos >= 0 {
 		channelMaps = c.removeChannelMap(key, pos)
 	}
-	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+	logger.Infow("watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
-	logger.Debug(ctx, "start-listening-on-channel ...")
+func (c *EtcdClient) listenForKeyChange(channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug("start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
 	for resp := range channel {
@@ -409,7 +409,7 @@
 			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
 		}
 	}
-	logger.Debug(ctx, "stop-listening-on-channel ...")
+	logger.Debug("stop-listening-on-channel ...")
 }
 
 func getEventType(event *v3Client.Event) int {
@@ -423,9 +423,9 @@
 }
 
 // Close closes the KV store client
-func (c *EtcdClient) Close(ctx context.Context) {
+func (c *EtcdClient) Close() {
 	if err := c.ectdAPI.Close(); err != nil {
-		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
+		logger.Errorw("error-closing-client", log.Fields{"error": err})
 	}
 }
 
diff --git a/pkg/flows/common.go b/pkg/flows/common.go
index 0328d72..557de3f 100644
--- a/pkg/flows/common.go
+++ b/pkg/flows/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "flowsUtils"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/flows/flow_utils.go b/pkg/flows/flow_utils.go
index 66e719c..3139144 100644
--- a/pkg/flows/flow_utils.go
+++ b/pkg/flows/flow_utils.go
@@ -17,7 +17,6 @@
 
 import (
 	"bytes"
-	"context"
 	"crypto/md5"
 	"encoding/binary"
 	"fmt"
@@ -504,7 +503,7 @@
 }
 
 //GetMetaData - legacy get method (only want lower 32 bits)
-func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+func GetMetaData(flow *ofp.OfpFlowStats) uint32 {
 	if flow == nil {
 		return 0
 	}
@@ -513,11 +512,11 @@
 			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
 		}
 	}
-	logger.Debug(ctx, "No-metadata-present")
+	logger.Debug("No-metadata-present")
 	return 0
 }
 
-func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+func GetMetaData64Bit(flow *ofp.OfpFlowStats) uint64 {
 	if flow == nil {
 		return 0
 	}
@@ -526,12 +525,12 @@
 			return field.GetTableMetadata()
 		}
 	}
-	logger.Debug(ctx, "No-metadata-present")
+	logger.Debug("No-metadata-present")
 	return 0
 }
 
 // function returns write metadata value from write_metadata action field
-func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+func GetMetadataFromWriteMetadataAction(flow *ofp.OfpFlowStats) uint64 {
 	if flow != nil {
 		for _, instruction := range flow.Instructions {
 			if instruction.Type == uint32(WRITE_METADATA) {
@@ -541,11 +540,11 @@
 			}
 		}
 	}
-	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
+	logger.Debugw("No-write-metadata-present", log.Fields{"flow": flow})
 	return 0
 }
 
-func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
+func GetTechProfileIDFromWriteMetaData(metadata uint64) uint16 {
 	/*
 	   Write metadata instruction value (metadata) is 8 bytes:
 	   MS 2 bytes: C Tag
@@ -555,15 +554,15 @@
 	   This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var tpId uint16 = 0
-	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	logger.Debugw("Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
 	if metadata != 0 {
 		tpId = uint16((metadata >> 32) & 0xFFFF)
-		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+		logger.Debugw("Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
 	}
 	return tpId
 }
 
-func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+func GetEgressPortNumberFromWriteMetadata(flow *ofp.OfpFlowStats) uint32 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -572,17 +571,17 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var uniPort uint32 = 0
-	md := GetMetadataFromWriteMetadataAction(ctx, flow)
-	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	md := GetMetadataFromWriteMetadataAction(flow)
+	logger.Debugw("Metadata found for egress/uni port ", log.Fields{"metadata": md})
 	if md != 0 {
 		uniPort = uint32(md & 0xFFFFFFFF)
-		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+		logger.Debugw("Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
 	}
 	return uniPort
 
 }
 
-func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
+func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint16 {
 	/*
 			  Write metadata instruction value (metadata) is 8 bytes:
 		    	MS 2 bytes: C Tag
@@ -591,10 +590,10 @@
 		    	This is set in the ONOS OltPipeline as a write metadata instruction
 	*/
 	var innerTag uint16 = 0
-	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	md := GetMetadataFromWriteMetadataAction(flow)
 	if md != 0 {
 		innerTag = uint16((md >> 48) & 0xFFFF)
-		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+		logger.Debugw("Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
 	}
 	return innerTag
 }
@@ -608,7 +607,7 @@
 		return 0
 	}
 	if md <= 0xffffffff {
-		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		logger.Debugw("onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
 		return md
 	}
 	return (md >> 32) & 0xffffffff
@@ -938,12 +937,12 @@
 }
 
 // flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
-func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+func MeterEntryFromMeterMod(meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
 	bandStats := make([]*ofp.OfpMeterBandStats, 0)
 	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
 		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
 	if meterMod == nil {
-		logger.Error(ctx, "Invalid meter mod command")
+		logger.Error("Invalid meter mod command")
 		return meter
 	}
 	// config init
@@ -965,7 +964,7 @@
 		bandStats = append(bandStats, band)
 	}
 	meter.Stats.BandStats = bandStats
-	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
+	logger.Debugw("Allocated meter entry", log.Fields{"meter": *meter})
 	return meter
 
 }
diff --git a/pkg/grpc/common.go b/pkg/grpc/common.go
index fa53542..17eeeaf 100644
--- a/pkg/grpc/common.go
+++ b/pkg/grpc/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "grpc"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "grpc"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/grpc/server.go b/pkg/grpc/server.go
index 2bf7696..fa5c521 100644
--- a/pkg/grpc/server.go
+++ b/pkg/grpc/server.go
@@ -93,19 +93,19 @@
 
 	lis, err := net.Listen("tcp", s.address)
 	if err != nil {
-		logger.Fatalf(ctx, "failed to listen: %v", err)
+		logger.Fatalf("failed to listen: %v", err)
 	}
 
 	if s.secure && s.GrpcSecurity != nil {
 		creds, err := credentials.NewServerTLSFromFile(s.CertFile, s.KeyFile)
 		if err != nil {
-			logger.Fatalf(ctx, "could not load TLS keys: %s", err)
+			logger.Fatalf("could not load TLS keys: %s", err)
 		}
 		s.gs = grpc.NewServer(grpc.Creds(creds),
 			withServerUnaryInterceptor(s))
 
 	} else {
-		logger.Info(ctx, "starting-insecure-grpc-server")
+		logger.Info("starting-insecure-grpc-server")
 		s.gs = grpc.NewServer(withServerUnaryInterceptor(s))
 	}
 
@@ -115,7 +115,7 @@
 	}
 
 	if err := s.gs.Serve(lis); err != nil {
-		logger.Fatalf(ctx, "failed to serve: %v\n", err)
+		logger.Fatalf("failed to serve: %v\n", err)
 	}
 }
 
@@ -138,7 +138,7 @@
 		handler grpc.UnaryHandler) (interface{}, error) {
 
 		if (s.probe != nil) && (!s.probe.IsReady()) {
-			logger.Warnf(ctx, "Grpc request received while not ready %v", req)
+			logger.Warnf("Grpc request received while not ready %v", req)
 			return nil, status.Error(codes.Unavailable, "system is not ready")
 		}
 
diff --git a/pkg/kafka/client.go b/pkg/kafka/client.go
index d977e38..0d9e3a5 100755
--- a/pkg/kafka/client.go
+++ b/pkg/kafka/client.go
@@ -16,9 +16,9 @@
 package kafka
 
 import (
-	"context"
-	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
 	"time"
+
+	ca "github.com/opencord/voltha-protos/v3/go/inter_container"
 )
 
 const (
@@ -61,15 +61,15 @@
 
 // MsgClient represents the set of APIs  a Kafka MsgClient must implement
 type Client interface {
-	Start(ctx context.Context) error
-	Stop(ctx context.Context)
-	CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error
-	DeleteTopic(ctx context.Context, topic *Topic) error
-	Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
-	UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ca.InterContainerMessage) error
-	SubscribeForMetadata(context.Context, func(fromTopic string, timestamp time.Time))
-	Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error
-	SendLiveness(ctx context.Context) error
-	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
-	EnableHealthinessChannel(ctx context.Context, enable bool) chan bool
+	Start() error
+	Stop()
+	CreateTopic(topic *Topic, numPartition int, repFactor int) error
+	DeleteTopic(topic *Topic) error
+	Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ca.InterContainerMessage, error)
+	UnSubscribe(topic *Topic, ch <-chan *ca.InterContainerMessage) error
+	SubscribeForMetadata(func(fromTopic string, timestamp time.Time))
+	Send(msg interface{}, topic *Topic, keys ...string) error
+	SendLiveness() error
+	EnableLivenessChannel(enable bool) chan bool
+	EnableHealthinessChannel(enable bool) chan bool
 }
diff --git a/pkg/kafka/common.go b/pkg/kafka/common.go
index 99b4cdf..149c150 100644
--- a/pkg/kafka/common.go
+++ b/pkg/kafka/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "kafka"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/kafka/endpoint_manager.go b/pkg/kafka/endpoint_manager.go
index a876c09..1258382 100644
--- a/pkg/kafka/endpoint_manager.go
+++ b/pkg/kafka/endpoint_manager.go
@@ -50,15 +50,15 @@
 
 	// GetEndpoint is called to get the endpoint to communicate with for a specific device and service type.  For
 	// now this will return the topic name
-	GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error)
+	GetEndpoint(deviceID string, serviceType string) (Endpoint, error)
 
 	// IsDeviceOwnedByService is invoked when a specific service (service type + replicaNumber) is restarted and
 	// devices owned by that service need to be reconciled
-	IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error)
+	IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error)
 
 	// GetReplicaAssignment returns the replica number of the service that owns the deviceID.  This is used by the
 	// test only
-	GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error)
+	GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error)
 }
 
 type service struct {
@@ -119,9 +119,9 @@
 	return newEndpointManager(backend, opts...)
 }
 
-func (ep *endpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (Endpoint, error) {
-	logger.Debugw(ctx, "getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+func (ep *endpointManager) GetEndpoint(deviceID string, serviceType string) (Endpoint, error) {
+	logger.Debugw("getting-endpoint", log.Fields{"device-id": deviceID, "service": serviceType})
+	owner, err := ep.getOwner(deviceID, serviceType)
 	if err != nil {
 		return "", err
 	}
@@ -133,13 +133,13 @@
 	if endpoint == "" {
 		return "", status.Errorf(codes.Unavailable, "endpoint-not-set-%s", serviceType)
 	}
-	logger.Debugw(ctx, "returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
+	logger.Debugw("returning-endpoint", log.Fields{"device-id": deviceID, "service": serviceType, "endpoint": endpoint})
 	return endpoint, nil
 }
 
-func (ep *endpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
-	logger.Debugw(ctx, "device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+func (ep *endpointManager) IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+	logger.Debugw("device-ownership", log.Fields{"device-id": deviceID, "service": serviceType, "replica-number": replicaNumber})
+	owner, err := ep.getOwner(deviceID, serviceType)
 	if err != nil {
 		return false, nil
 	}
@@ -150,8 +150,8 @@
 	return m.getReplica() == ReplicaID(replicaNumber), nil
 }
 
-func (ep *endpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (ReplicaID, error) {
-	owner, err := ep.getOwner(ctx, deviceID, serviceType)
+func (ep *endpointManager) GetReplicaAssignment(deviceID string, serviceType string) (ReplicaID, error) {
+	owner, err := ep.getOwner(deviceID, serviceType)
 	if err != nil {
 		return 0, nil
 	}
@@ -162,8 +162,8 @@
 	return m.getReplica(), nil
 }
 
-func (ep *endpointManager) getOwner(ctx context.Context, deviceID string, serviceType string) (consistent.Member, error) {
-	serv, dType, err := ep.getServiceAndDeviceType(ctx, serviceType)
+func (ep *endpointManager) getOwner(deviceID string, serviceType string) (consistent.Member, error) {
+	serv, dType, err := ep.getServiceAndDeviceType(serviceType)
 	if err != nil {
 		return nil, err
 	}
@@ -171,7 +171,7 @@
 	return serv.consistentRing.LocateKey(key), nil
 }
 
-func (ep *endpointManager) getServiceAndDeviceType(ctx context.Context, serviceType string) (*service, string, error) {
+func (ep *endpointManager) getServiceAndDeviceType(serviceType string) (*service, string, error) {
 	// Check whether service exist
 	ep.servicesLock.RLock()
 	serv, serviceExist := ep.services[serviceType]
@@ -179,7 +179,7 @@
 
 	// Load the service and device types if needed
 	if !serviceExist || serv == nil || int(serv.totalReplicas) != len(serv.consistentRing.GetMembers()) {
-		if err := ep.loadServices(ctx); err != nil {
+		if err := ep.loadServices(); err != nil {
 			return nil, "", err
 		}
 
@@ -214,7 +214,7 @@
 // loadServices loads the services (adapters) and device types in memory. Because of the small size of the data and
 // the data format in the dB being binary protobuf then it is better to load all the data if inconsistency is detected,
 // instead of watching for updates in the dB and acting on it.
-func (ep *endpointManager) loadServices(ctx context.Context) error {
+func (ep *endpointManager) loadServices() error {
 	ep.servicesLock.Lock()
 	defer ep.servicesLock.Unlock()
 	ep.deviceTypeServiceMapLock.Lock()
@@ -276,13 +276,13 @@
 	if logger.V(log.DebugLevel) {
 		for key, val := range ep.services {
 			members := val.consistentRing.GetMembers()
-			logger.Debugw(ctx, "service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
+			logger.Debugw("service", log.Fields{"service": key, "expected-replica": val.totalReplicas, "replicas": len(val.consistentRing.GetMembers())})
 			for _, m := range members {
 				n := m.(Member)
-				logger.Debugw(ctx, "service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
+				logger.Debugw("service-loaded", log.Fields{"serviceId": n.getID(), "serviceType": n.getServiceType(), "replica": n.getReplica(), "endpoint": n.getEndPoint()})
 			}
 		}
-		logger.Debugw(ctx, "device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
+		logger.Debugw("device-types-loaded", log.Fields{"device-types": ep.deviceTypeServiceMap})
 	}
 	return nil
 }
diff --git a/pkg/kafka/endpoint_manager_test.go b/pkg/kafka/endpoint_manager_test.go
index a42cc2e..3790221 100644
--- a/pkg/kafka/endpoint_manager_test.go
+++ b/pkg/kafka/endpoint_manager_test.go
@@ -42,7 +42,6 @@
 }
 
 func newEPTest(minReplicas, maxReplicas int) *EPTest {
-	ctx := context.Background()
 	test := &EPTest{
 		minReplicas: minReplicas,
 		maxReplicas: maxReplicas,
@@ -50,18 +49,17 @@
 
 	// Create backend
 	if err := test.initBackend(); err != nil {
-		logger.Fatalw(ctx, "setting-backend-failed", log.Fields{"error": err})
+		logger.Fatalw("setting-backend-failed", log.Fields{"error": err})
 	}
 
 	// Populate backend with data
 	if err := test.populateBackend(); err != nil {
-		logger.Fatalw(ctx, "populating-db-failed", log.Fields{"error": err})
+		logger.Fatalw("populating-db-failed", log.Fields{"error": err})
 	}
 	return test
 }
 
 func (ep *EPTest) initBackend() error {
-	ctx := context.Background()
 	configName := "voltha-lib.kafka.ep.test"
 	storageDir := "voltha-lib.kafka.ep.etcd"
 	logLevel := "error"
@@ -75,18 +73,18 @@
 	if err != nil {
 		return err
 	}
-	ep.etcdServer = etcd.StartEtcdServer(ctx, etcd.MKConfig(ctx, configName, kvClientPort, peerPort, storageDir, logLevel))
+	ep.etcdServer = etcd.StartEtcdServer(etcd.MKConfig(configName, kvClientPort, peerPort, storageDir, logLevel))
 	if ep.etcdServer == nil {
 		return status.Error(codes.Internal, "Embedded server failed to start")
 	}
 
-	ep.backend = db.NewBackend(ctx, "etcd", "127.0.0.1"+":"+strconv.Itoa(kvClientPort), timeout, "service/voltha")
+	ep.backend = db.NewBackend("etcd", "127.0.0.1"+":"+strconv.Itoa(kvClientPort), timeout, "service/voltha")
 	return nil
 }
 
 func (ep *EPTest) stopAll() {
 	if ep.etcdServer != nil {
-		ep.etcdServer.Stop(context.Background())
+		ep.etcdServer.Stop()
 	}
 }
 
@@ -187,21 +185,20 @@
 }
 
 func (ep *EPTest) testEndpointManagerAPIs(t *testing.T, tm EndpointManager, serviceType string, deviceType string, replicas int) {
-	ctx := context.Background()
 	// Map of device ids to topic
 	deviceIDs := make(map[string]Endpoint)
 	numDevices := 1000
 	total := make([]int, replicas)
 	for i := 0; i < numDevices; i++ {
 		deviceID := uuid.New().String()
-		endpoint, err := tm.GetEndpoint(ctx, deviceID, serviceType)
+		endpoint, err := tm.GetEndpoint(deviceID, serviceType)
 		if err != nil {
-			logger.Fatalw(ctx, "error-getting-endpoint", log.Fields{"error": err})
+			logger.Fatalw("error-getting-endpoint", log.Fields{"error": err})
 		}
 		deviceIDs[deviceID] = endpoint
-		replicaID, err := tm.GetReplicaAssignment(ctx, deviceID, serviceType)
+		replicaID, err := tm.GetReplicaAssignment(deviceID, serviceType)
 		if err != nil {
-			logger.Fatalw(ctx, "error-getting-endpoint", log.Fields{"error": err})
+			logger.Fatalw("error-getting-endpoint", log.Fields{"error": err})
 		}
 		total[replicaID] += 1
 	}
@@ -213,9 +210,9 @@
 	numIterations := 10
 	for i := 0; i < numIterations; i++ {
 		for deviceID, expectedEndpoint := range deviceIDs {
-			endpointByServiceType, err := tm.GetEndpoint(ctx, deviceID, serviceType)
+			endpointByServiceType, err := tm.GetEndpoint(deviceID, serviceType)
 			if err != nil {
-				logger.Fatalw(ctx, "error-getting-endpoint", log.Fields{"error": err})
+				logger.Fatalw("error-getting-endpoint", log.Fields{"error": err})
 			}
 			assert.Equal(t, expectedEndpoint, endpointByServiceType)
 		}
@@ -223,14 +220,14 @@
 
 	// Verify that a device belong to the correct node
 	for deviceID := range deviceIDs {
-		replicaID, err := tm.GetReplicaAssignment(ctx, deviceID, serviceType)
+		replicaID, err := tm.GetReplicaAssignment(deviceID, serviceType)
 		if err != nil {
-			logger.Fatalw(ctx, "error-getting-topic", log.Fields{"error": err})
+			logger.Fatalw("error-getting-topic", log.Fields{"error": err})
 		}
 		for k := 0; k < replicas; k++ {
-			owned, err := tm.IsDeviceOwnedByService(ctx, deviceID, serviceType, int32(k))
+			owned, err := tm.IsDeviceOwnedByService(deviceID, serviceType, int32(k))
 			if err != nil {
-				logger.Fatalw(ctx, "error-verifying-device-ownership", log.Fields{"error": err})
+				logger.Fatalw("error-verifying-device-ownership", log.Fields{"error": err})
 			}
 			assert.Equal(t, ReplicaID(k) == replicaID, owned)
 		}
diff --git a/pkg/kafka/kafka_inter_container_library.go b/pkg/kafka/kafka_inter_container_library.go
index c14c54b..9f9fbfc 100644
--- a/pkg/kafka/kafka_inter_container_library.go
+++ b/pkg/kafka/kafka_inter_container_library.go
@@ -63,18 +63,18 @@
 }
 
 type InterContainerProxy interface {
-	Start(ctx context.Context) error
-	Stop(ctx context.Context)
+	Start() error
+	Stop()
 	GetDefaultTopic() *Topic
-	DeviceDiscovered(ctx context.Context, deviceId string, deviceType string, parentId string, publisher string) error
+	DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error
 	InvokeRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) (bool, *any.Any)
 	InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic, waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse
-	SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error
-	SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error
-	UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error
-	DeleteTopic(ctx context.Context, topic Topic) error
-	EnableLivenessChannel(ctx context.Context, enable bool) chan bool
-	SendLiveness(ctx context.Context) error
+	SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error
+	SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error
+	UnSubscribeFromRequestHandler(topic Topic) error
+	DeleteTopic(topic Topic) error
+	EnableLivenessChannel(enable bool) chan bool
+	SendLiveness() error
 }
 
 // interContainerProxy represents the messaging proxy
@@ -153,17 +153,17 @@
 	return newInterContainerProxy(opts...)
 }
 
-func (kp *interContainerProxy) Start(ctx context.Context) error {
-	logger.Info(ctx, "Starting-Proxy")
+func (kp *interContainerProxy) Start() error {
+	logger.Info("Starting-Proxy")
 
 	// Kafka MsgClient should already have been created.  If not, output fatal error
 	if kp.kafkaClient == nil {
-		logger.Fatal(ctx, "kafka-client-not-set")
+		logger.Fatal("kafka-client-not-set")
 	}
 
 	// Start the kafka client
-	if err := kp.kafkaClient.Start(ctx); err != nil {
-		logger.Errorw(ctx, "Cannot-create-kafka-proxy", log.Fields{"error": err})
+	if err := kp.kafkaClient.Start(); err != nil {
+		logger.Errorw("Cannot-create-kafka-proxy", log.Fields{"error": err})
 		return err
 	}
 
@@ -179,20 +179,20 @@
 	return nil
 }
 
-func (kp *interContainerProxy) Stop(ctx context.Context) {
-	logger.Info(ctx, "stopping-intercontainer-proxy")
+func (kp *interContainerProxy) Stop() {
+	logger.Info("stopping-intercontainer-proxy")
 	kp.doneOnce.Do(func() { close(kp.doneCh) })
 	// TODO : Perform cleanup
-	kp.kafkaClient.Stop(ctx)
-	err := kp.deleteAllTopicRequestHandlerChannelMap(ctx)
+	kp.kafkaClient.Stop()
+	err := kp.deleteAllTopicRequestHandlerChannelMap()
 	if err != nil {
-		logger.Errorw(ctx, "failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
+		logger.Errorw("failed-delete-all-topic-request-handler-channel-map", log.Fields{"error": err})
 	}
-	err = kp.deleteAllTopicResponseChannelMap(ctx)
+	err = kp.deleteAllTopicResponseChannelMap()
 	if err != nil {
-		logger.Errorw(ctx, "failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
+		logger.Errorw("failed-delete-all-topic-response-channel-map", log.Fields{"error": err})
 	}
-	kp.deleteAllTransactionIdToChannelMap(ctx)
+	kp.deleteAllTransactionIdToChannelMap()
 }
 
 func (kp *interContainerProxy) GetDefaultTopic() *Topic {
@@ -200,11 +200,11 @@
 }
 
 // DeviceDiscovered publish the discovered device onto the kafka messaging bus
-func (kp *interContainerProxy) DeviceDiscovered(ctx context.Context, deviceId string, deviceType string, parentId string, publisher string) error {
-	logger.Debugw(ctx, "sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
+func (kp *interContainerProxy) DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error {
+	logger.Debugw("sending-device-discovery-msg", log.Fields{"deviceId": deviceId})
 	//	Simple validation
 	if deviceId == "" || deviceType == "" {
-		logger.Errorw(ctx, "invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
+		logger.Errorw("invalid-parameters", log.Fields{"id": deviceId, "type": deviceType})
 		return errors.New("invalid-parameters")
 	}
 	//	Create the device discovery message
@@ -225,7 +225,7 @@
 	var marshalledData *any.Any
 	var err error
 	if marshalledData, err = ptypes.MarshalAny(body); err != nil {
-		logger.Errorw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+		logger.Errorw("cannot-marshal-request", log.Fields{"error": err})
 		return err
 	}
 	msg := &ic.InterContainerMessage{
@@ -234,8 +234,8 @@
 	}
 
 	// Send the message
-	if err := kp.kafkaClient.Send(ctx, msg, kp.deviceDiscoveryTopic); err != nil {
-		logger.Errorw(ctx, "cannot-send-device-discovery-message", log.Fields{"error": err})
+	if err := kp.kafkaClient.Send(msg, kp.deviceDiscoveryTopic); err != nil {
+		logger.Errorw("cannot-send-device-discovery-message", log.Fields{"error": err})
 		return err
 	}
 	return nil
@@ -245,7 +245,7 @@
 func (kp *interContainerProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *Topic, replyToTopic *Topic,
 	waitForResponse bool, key string, kvArgs ...*KVArg) chan *RpcResponse {
 
-	logger.Debugw(ctx, "InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
+	logger.Debugw("InvokeAsyncRPC", log.Fields{"rpc": rpc, "key": key})
 	//	If a replyToTopic is provided then we use it, otherwise just use the  default toTopic.  The replyToTopic is
 	// typically the device ID.
 	responseTopic := replyToTopic
@@ -265,17 +265,17 @@
 		var protoRequest *ic.InterContainerMessage
 
 		// Encode the request
-		protoRequest, err = encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+		protoRequest, err = encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
 		if err != nil {
-			logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+			logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
 			chnl <- NewResponse(RpcFormattingError, err, nil)
 			return
 		}
 
 		// Subscribe for response, if needed, before sending request
 		var ch <-chan *ic.InterContainerMessage
-		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
 			chnl <- NewResponse(RpcTransportError, err, nil)
 			return
 		}
@@ -283,10 +283,10 @@
 		// Send request - if the topic is formatted with a device Id then we will send the request using a
 		// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
 		// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
-		logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+		logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
 
 		// if the message is not sent on kafka publish an event an close the channel
-		if err = kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
+		if err = kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
 			chnl <- NewResponse(RpcTransportError, err, nil)
 			return
 		}
@@ -299,8 +299,8 @@
 
 		defer func() {
 			// Remove the subscription for a response on return
-			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
-				logger.Warnw(ctx, "invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
+			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
+				logger.Warnw("invoke-async-rpc-unsubscriber-for-response-failed", log.Fields{"err": err})
 			}
 		}()
 
@@ -308,11 +308,11 @@
 		select {
 		case msg, ok := <-ch:
 			if !ok {
-				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
 				chnl <- NewResponse(RpcTransportError, status.Error(codes.Aborted, "channel closed"), nil)
 			}
-			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
-			if responseBody, err := decodeResponse(ctx, msg); err != nil {
+			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			if responseBody, err := decodeResponse(msg); err != nil {
 				chnl <- NewResponse(RpcReply, err, nil)
 			} else {
 				if responseBody.Success {
@@ -328,12 +328,12 @@
 				}
 			}
 		case <-ctx.Done():
-			logger.Errorw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			logger.Errorw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
 			err := status.Error(codes.DeadlineExceeded, ctx.Err().Error())
 			chnl <- NewResponse(RpcTimeout, err, nil)
 		case <-kp.doneCh:
 			chnl <- NewResponse(RpcSystemClosing, nil, nil)
-			logger.Warnw(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			logger.Warnw("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
 		}
 	}()
 	return chnl
@@ -351,9 +351,9 @@
 	}
 
 	// Encode the request
-	protoRequest, err := encodeRequest(ctx, rpc, toTopic, responseTopic, key, kvArgs...)
+	protoRequest, err := encodeRequest(rpc, toTopic, responseTopic, key, kvArgs...)
 	if err != nil {
-		logger.Warnw(ctx, "cannot-format-request", log.Fields{"rpc": rpc, "error": err})
+		logger.Warnw("cannot-format-request", log.Fields{"rpc": rpc, "error": err})
 		return false, nil
 	}
 
@@ -361,8 +361,8 @@
 	var ch <-chan *ic.InterContainerMessage
 	if waitForResponse {
 		var err error
-		if ch, err = kp.subscribeForResponse(ctx, *responseTopic, protoRequest.Header.Id); err != nil {
-			logger.Errorw(ctx, "failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
+		if ch, err = kp.subscribeForResponse(*responseTopic, protoRequest.Header.Id); err != nil {
+			logger.Errorw("failed-to-subscribe-for-response", log.Fields{"error": err, "toTopic": toTopic.Name})
 		}
 	}
 
@@ -370,10 +370,10 @@
 	// specific key, hence ensuring a single partition is used to publish the request.  This ensures that the
 	// subscriber on that topic will receive the request in the order it was sent.  The key used is the deviceId.
 	//key := GetDeviceIdFromTopic(*toTopic)
-	logger.Debugw(ctx, "sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
+	logger.Debugw("sending-msg", log.Fields{"rpc": rpc, "toTopic": toTopic, "replyTopic": responseTopic, "key": key, "xId": protoRequest.Header.Id})
 	go func() {
-		if err := kp.kafkaClient.Send(ctx, protoRequest, toTopic, key); err != nil {
-			logger.Errorw(ctx, "send-failed", log.Fields{
+		if err := kp.kafkaClient.Send(protoRequest, toTopic, key); err != nil {
+			logger.Errorw("send-failed", log.Fields{
 				"topic": toTopic,
 				"key":   key,
 				"error": err})
@@ -394,8 +394,8 @@
 		// Wait for response as well as timeout or cancellation
 		// Remove the subscription for a response on return
 		defer func() {
-			if err := kp.unSubscribeForResponse(ctx, protoRequest.Header.Id); err != nil {
-				logger.Errorw(ctx, "response-unsubscribe-failed", log.Fields{
+			if err := kp.unSubscribeForResponse(protoRequest.Header.Id); err != nil {
+				logger.Errorw("response-unsubscribe-failed", log.Fields{
 					"id":    protoRequest.Header.Id,
 					"error": err})
 			}
@@ -403,7 +403,7 @@
 		select {
 		case msg, ok := <-ch:
 			if !ok {
-				logger.Warnw(ctx, "channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
+				logger.Warnw("channel-closed", log.Fields{"rpc": rpc, "replyTopic": replyToTopic.Name})
 				protoError := &ic.Error{Reason: "channel-closed"}
 				var marshalledArg *any.Any
 				if marshalledArg, err = ptypes.MarshalAny(protoError); err != nil {
@@ -411,16 +411,16 @@
 				}
 				return false, marshalledArg
 			}
-			logger.Debugw(ctx, "received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
+			logger.Debugw("received-response", log.Fields{"rpc": rpc, "msgHeader": msg.Header})
 			var responseBody *ic.InterContainerResponseBody
 			var err error
-			if responseBody, err = decodeResponse(ctx, msg); err != nil {
-				logger.Errorw(ctx, "decode-response-error", log.Fields{"error": err})
+			if responseBody, err = decodeResponse(msg); err != nil {
+				logger.Errorw("decode-response-error", log.Fields{"error": err})
 				// FIXME we should return something
 			}
 			return responseBody.Success, responseBody.Result
 		case <-ctx.Done():
-			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": ctx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: ctx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
 
@@ -430,7 +430,7 @@
 			}
 			return false, marshalledArg
 		case <-childCtx.Done():
-			logger.Debugw(ctx, "context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
+			logger.Debugw("context-cancelled", log.Fields{"rpc": rpc, "ctx": childCtx.Err()})
 			//	 pack the error as proto any type
 			protoError := &ic.Error{Reason: childCtx.Err().Error(), Code: ic.ErrorCode_DEADLINE_EXCEEDED}
 
@@ -440,7 +440,7 @@
 			}
 			return false, marshalledArg
 		case <-kp.doneCh:
-			logger.Infow(ctx, "received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
+			logger.Infow("received-exit-signal", log.Fields{"toTopic": toTopic.Name, "rpc": rpc})
 			return true, nil
 		}
 	}
@@ -449,55 +449,55 @@
 
 // SubscribeWithRequestHandlerInterface allows a caller to assign a target object to be invoked automatically
 // when a message is received on a given topic
-func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic Topic, handler interface{}) error {
+func (kp *interContainerProxy) SubscribeWithRequestHandlerInterface(topic Topic, handler interface{}) error {
 
 	// Subscribe to receive messages for that topic
 	var ch <-chan *ic.InterContainerMessage
 	var err error
-	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic); err != nil {
+	if ch, err = kp.kafkaClient.Subscribe(&topic); err != nil {
 		//if ch, err = kp.Subscribe(topic); err != nil {
-		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 
 	kp.defaultRequestHandlerInterface = handler
 	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: handler, ch: ch})
 	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ctx, ch, topic, handler)
+	go kp.waitForMessages(ch, topic, handler)
 
 	return nil
 }
 
 // SubscribeWithDefaultRequestHandler allows a caller to add a topic to an existing target object to be invoked automatically
 // when a message is received on a given topic.  So far there is only 1 target registered per microservice
-func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic Topic, initialOffset int64) error {
+func (kp *interContainerProxy) SubscribeWithDefaultRequestHandler(topic Topic, initialOffset int64) error {
 	// Subscribe to receive messages for that topic
 	var ch <-chan *ic.InterContainerMessage
 	var err error
-	if ch, err = kp.kafkaClient.Subscribe(ctx, &topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
-		logger.Errorw(ctx, "failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
+	if ch, err = kp.kafkaClient.Subscribe(&topic, &KVArg{Key: Offset, Value: initialOffset}); err != nil {
+		logger.Errorw("failed-to-subscribe", log.Fields{"error": err, "topic": topic.Name})
 		return err
 	}
 	kp.addToTopicRequestHandlerChannelMap(topic.Name, &requestHandlerChannel{requesthandlerInterface: kp.defaultRequestHandlerInterface, ch: ch})
 
 	// Launch a go routine to receive and process kafka messages
-	go kp.waitForMessages(ctx, ch, topic, kp.defaultRequestHandlerInterface)
+	go kp.waitForMessages(ch, topic, kp.defaultRequestHandlerInterface)
 
 	return nil
 }
 
-func (kp *interContainerProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic Topic) error {
-	return kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name)
+func (kp *interContainerProxy) UnSubscribeFromRequestHandler(topic Topic) error {
+	return kp.deleteFromTopicRequestHandlerChannelMap(topic.Name)
 }
 
-func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(ctx context.Context, topic string) error {
+func (kp *interContainerProxy) deleteFromTopicResponseChannelMap(topic string) error {
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
 	if _, exist := kp.topicToResponseChannelMap[topic]; exist {
 		// Unsubscribe to this topic first - this will close the subscribed channel
 		var err error
-		if err = kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic})
+		if err = kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic})
 		}
 		delete(kp.topicToResponseChannelMap, topic)
 		return err
@@ -507,16 +507,16 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTopicResponseChannelMap(ctx context.Context) error {
-	logger.Debug(ctx, "delete-all-topic-response-channel")
+func (kp *interContainerProxy) deleteAllTopicResponseChannelMap() error {
+	logger.Debug("delete-all-topic-response-channel")
 	kp.lockTopicResponseChannelMap.Lock()
 	defer kp.lockTopicResponseChannelMap.Unlock()
 	var unsubscribeFailTopics []string
 	for topic := range kp.topicToResponseChannelMap {
 		// Unsubscribe to this topic first - this will close the subscribed channel
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToResponseChannelMap[topic]); err != nil {
 			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
 			// Do not return. Continue to try to unsubscribe to other topics.
 		} else {
 			// Only delete from channel map if successfully unsubscribed.
@@ -537,12 +537,12 @@
 	}
 }
 
-func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(ctx context.Context, topic string) error {
+func (kp *interContainerProxy) deleteFromTopicRequestHandlerChannelMap(topic string) error {
 	kp.lockTopicRequestHandlerChannelMap.Lock()
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
 	if _, exist := kp.topicToRequestHandlerChannelMap[topic]; exist {
 		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
 			return err
 		}
 		delete(kp.topicToRequestHandlerChannelMap, topic)
@@ -553,16 +553,16 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap(ctx context.Context) error {
-	logger.Debug(ctx, "delete-all-topic-request-channel")
+func (kp *interContainerProxy) deleteAllTopicRequestHandlerChannelMap() error {
+	logger.Debug("delete-all-topic-request-channel")
 	kp.lockTopicRequestHandlerChannelMap.Lock()
 	defer kp.lockTopicRequestHandlerChannelMap.Unlock()
 	var unsubscribeFailTopics []string
 	for topic := range kp.topicToRequestHandlerChannelMap {
 		// Close the kafka client client first by unsubscribing to this topic
-		if err := kp.kafkaClient.UnSubscribe(ctx, &Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
+		if err := kp.kafkaClient.UnSubscribe(&Topic{Name: topic}, kp.topicToRequestHandlerChannelMap[topic].ch); err != nil {
 			unsubscribeFailTopics = append(unsubscribeFailTopics, topic)
-			logger.Errorw(ctx, "unsubscribing-error", log.Fields{"topic": topic, "error": err})
+			logger.Errorw("unsubscribing-error", log.Fields{"topic": topic, "error": err})
 			// Do not return. Continue to try to unsubscribe to other topics.
 		} else {
 			// Only delete from channel map if successfully unsubscribed.
@@ -605,8 +605,8 @@
 }
 
 // nolint: unused
-func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap(ctx context.Context) {
-	logger.Debug(ctx, "delete-all-transaction-id-channel-map")
+func (kp *interContainerProxy) deleteAllTransactionIdToChannelMap() {
+	logger.Debug("delete-all-transaction-id-channel-map")
 	kp.lockTransactionIdToChannelMap.Lock()
 	defer kp.lockTransactionIdToChannelMap.Unlock()
 	for key, value := range kp.transactionIdToChannelMap {
@@ -615,27 +615,27 @@
 	}
 }
 
-func (kp *interContainerProxy) DeleteTopic(ctx context.Context, topic Topic) error {
+func (kp *interContainerProxy) DeleteTopic(topic Topic) error {
 	// If we have any consumers on that topic we need to close them
-	if err := kp.deleteFromTopicResponseChannelMap(ctx, topic.Name); err != nil {
-		logger.Errorw(ctx, "delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
+	if err := kp.deleteFromTopicResponseChannelMap(topic.Name); err != nil {
+		logger.Errorw("delete-from-topic-responsechannelmap-failed", log.Fields{"error": err})
 	}
-	if err := kp.deleteFromTopicRequestHandlerChannelMap(ctx, topic.Name); err != nil {
-		logger.Errorw(ctx, "delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
+	if err := kp.deleteFromTopicRequestHandlerChannelMap(topic.Name); err != nil {
+		logger.Errorw("delete-from-topic-requesthandlerchannelmap-failed", log.Fields{"error": err})
 	}
 	kp.deleteTopicTransactionIdToChannelMap(topic.Name)
 
-	return kp.kafkaClient.DeleteTopic(ctx, &topic)
+	return kp.kafkaClient.DeleteTopic(&topic)
 }
 
-func encodeReturnedValue(ctx context.Context, returnedVal interface{}) (*any.Any, error) {
+func encodeReturnedValue(returnedVal interface{}) (*any.Any, error) {
 	// Encode the response argument - needs to be a proto message
 	if returnedVal == nil {
 		return nil, nil
 	}
 	protoValue, ok := returnedVal.(proto.Message)
 	if !ok {
-		logger.Warnw(ctx, "response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
+		logger.Warnw("response-value-not-proto-message", log.Fields{"error": ok, "returnVal": returnedVal})
 		err := errors.New("response-value-not-proto-message")
 		return nil, err
 	}
@@ -644,13 +644,13 @@
 	var marshalledReturnedVal *any.Any
 	var err error
 	if marshalledReturnedVal, err = ptypes.MarshalAny(protoValue); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-returned-val", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-returned-val", log.Fields{"error": err})
 		return nil, err
 	}
 	return marshalledReturnedVal, nil
 }
 
-func encodeDefaultFailedResponse(ctx context.Context, request *ic.InterContainerMessage) *ic.InterContainerMessage {
+func encodeDefaultFailedResponse(request *ic.InterContainerMessage) *ic.InterContainerMessage {
 	responseHeader := &ic.Header{
 		Id:        request.Header.Id,
 		Type:      ic.MessageType_RESPONSE,
@@ -666,7 +666,7 @@
 	var err error
 	// Error should never happen here
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-failed-response-body", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-failed-response-body", log.Fields{"error": err})
 	}
 
 	return &ic.InterContainerMessage{
@@ -678,8 +678,8 @@
 
 //formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
 //or an error on failure
-func encodeResponse(ctx context.Context, request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
-	//logger.Debugw(ctx, "encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
+func encodeResponse(request *ic.InterContainerMessage, success bool, returnedValues ...interface{}) (*ic.InterContainerMessage, error) {
+	//logger.Debugw("encodeResponse", log.Fields{"success": success, "returnedValues": returnedValues})
 	responseHeader := &ic.Header{
 		Id:        request.Header.Id,
 		Type:      ic.MessageType_RESPONSE,
@@ -695,8 +695,8 @@
 
 	// for now we support only 1 returned value - (excluding the error)
 	if len(returnedValues) > 0 {
-		if marshalledReturnedVal, err = encodeReturnedValue(ctx, returnedValues[0]); err != nil {
-			logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		if marshalledReturnedVal, err = encodeReturnedValue(returnedValues[0]); err != nil {
+			logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
 		}
 	}
 
@@ -708,7 +708,7 @@
 	// Marshal the response body
 	var marshalledResponseBody *any.Any
 	if marshalledResponseBody, err = ptypes.MarshalAny(responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-response-body", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-response-body", log.Fields{"error": err})
 		return nil, err
 	}
 
@@ -718,7 +718,7 @@
 	}, nil
 }
 
-func CallFuncByName(ctx context.Context, myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
+func CallFuncByName(myClass interface{}, funcName string, params ...interface{}) (out []reflect.Value, err error) {
 	myClassValue := reflect.ValueOf(myClass)
 	// Capitalize the first letter in the funcName to workaround the first capital letters required to
 	// invoke a function from a different package
@@ -727,16 +727,15 @@
 	if !m.IsValid() {
 		return make([]reflect.Value, 0), fmt.Errorf("method-not-found \"%s\"", funcName)
 	}
-	in := make([]reflect.Value, len(params)+1)
-	in[0] = reflect.ValueOf(ctx)
+	in := make([]reflect.Value, len(params))
 	for i, param := range params {
-		in[i+1] = reflect.ValueOf(param)
+		in[i] = reflect.ValueOf(param)
 	}
 	out = m.Call(in)
 	return
 }
 
-func (kp *interContainerProxy) addTransactionId(ctx context.Context, transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
+func (kp *interContainerProxy) addTransactionId(transactionId string, currentArgs []*ic.Argument) []*ic.Argument {
 	arg := &KVArg{
 		Key:   TransactionKey,
 		Value: &ic.StrType{Val: transactionId},
@@ -745,7 +744,7 @@
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: transactionId}); err != nil {
-		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -755,11 +754,11 @@
 	return append(currentArgs, protoArg)
 }
 
-func (kp *interContainerProxy) addFromTopic(ctx context.Context, fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
+func (kp *interContainerProxy) addFromTopic(fromTopic string, currentArgs []*ic.Argument) []*ic.Argument {
 	var marshalledArg *any.Any
 	var err error
 	if marshalledArg, err = ptypes.MarshalAny(&ic.StrType{Val: fromTopic}); err != nil {
-		logger.Warnw(ctx, "cannot-add-transactionId", log.Fields{"error": err})
+		logger.Warnw("cannot-add-transactionId", log.Fields{"error": err})
 		return currentArgs
 	}
 	protoArg := &ic.Argument{
@@ -769,7 +768,7 @@
 	return append(currentArgs, protoArg)
 }
 
-func (kp *interContainerProxy) handleMessage(ctx context.Context, msg *ic.InterContainerMessage, targetInterface interface{}) {
+func (kp *interContainerProxy) handleMessage(msg *ic.InterContainerMessage, targetInterface interface{}) {
 
 	// First extract the header to know whether this is a request - responses are handled by a different handler
 	if msg.Header.Type == ic.MessageType_REQUEST {
@@ -779,21 +778,21 @@
 		// Get the request body
 		requestBody := &ic.InterContainerRequestBody{}
 		if err = ptypes.UnmarshalAny(msg.Body, requestBody); err != nil {
-			logger.Warnw(ctx, "cannot-unmarshal-request", log.Fields{"error": err})
+			logger.Warnw("cannot-unmarshal-request", log.Fields{"error": err})
 		} else {
-			logger.Debugw(ctx, "received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
+			logger.Debugw("received-request", log.Fields{"rpc": requestBody.Rpc, "header": msg.Header})
 			// let the callee unpack the arguments as its the only one that knows the real proto type
 			// Augment the requestBody with the message Id as it will be used in scenarios where cores
 			// are set in pairs and competing
-			requestBody.Args = kp.addTransactionId(ctx, msg.Header.Id, requestBody.Args)
+			requestBody.Args = kp.addTransactionId(msg.Header.Id, requestBody.Args)
 
 			// Augment the requestBody with the From topic name as it will be used in scenarios where a container
 			// needs to send an unsollicited message to the currently requested container
-			requestBody.Args = kp.addFromTopic(ctx, msg.Header.FromTopic, requestBody.Args)
+			requestBody.Args = kp.addFromTopic(msg.Header.FromTopic, requestBody.Args)
 
-			out, err = CallFuncByName(ctx, targetInterface, requestBody.Rpc, requestBody.Args)
+			out, err = CallFuncByName(targetInterface, requestBody.Rpc, requestBody.Args)
 			if err != nil {
-				logger.Warn(ctx, err)
+				logger.Warn(err)
 			}
 		}
 		// Response required?
@@ -813,7 +812,7 @@
 				if out[lastIndex].Interface() != nil { // Error
 					if retError, ok := out[lastIndex].Interface().(error); ok {
 						if retError.Error() == ErrorTransactionNotAcquired.Error() {
-							logger.Debugw(ctx, "Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
+							logger.Debugw("Ignoring request", log.Fields{"error": retError, "txId": msg.Header.Id})
 							return // Ignore - process is in competing mode and ignored transaction
 						}
 						returnError = &ic.Error{Reason: retError.Error()}
@@ -823,12 +822,12 @@
 						returnedValues = append(returnedValues, returnError)
 					}
 				} else if len(out) == 2 && reflect.ValueOf(out[0].Interface()).IsValid() && reflect.ValueOf(out[0].Interface()).IsNil() {
-					logger.Warnw(ctx, "Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
+					logger.Warnw("Unexpected response of (nil,nil)", log.Fields{"txId": msg.Header.Id})
 					return // Ignore - should not happen
 				} else { // Non-error case
 					success = true
 					for idx, val := range out {
-						//logger.Debugw(ctx, "returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
+						//logger.Debugw("returned-api-response-loop", log.Fields{"idx": idx, "val": val.Interface()})
 						if idx != lastIndex {
 							returnedValues = append(returnedValues, val.Interface())
 						}
@@ -837,9 +836,9 @@
 			}
 
 			var icm *ic.InterContainerMessage
-			if icm, err = encodeResponse(ctx, msg, success, returnedValues...); err != nil {
-				logger.Warnw(ctx, "error-encoding-response-returning-failure-result", log.Fields{"error": err})
-				icm = encodeDefaultFailedResponse(ctx, msg)
+			if icm, err = encodeResponse(msg, success, returnedValues...); err != nil {
+				logger.Warnw("error-encoding-response-returning-failure-result", log.Fields{"error": err})
+				icm = encodeDefaultFailedResponse(msg)
 			}
 			// To preserve ordering of messages, all messages to a given topic are sent to the same partition
 			// by providing a message key.   The key is encoded in the topic name.  If the deviceId is not
@@ -847,11 +846,11 @@
 			// partitions.
 			replyTopic := &Topic{Name: msg.Header.FromTopic}
 			key := msg.Header.KeyTopic
-			logger.Debugw(ctx, "sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
+			logger.Debugw("sending-response-to-kafka", log.Fields{"rpc": requestBody.Rpc, "header": icm.Header, "key": key})
 			// TODO: handle error response.
 			go func() {
-				if err := kp.kafkaClient.Send(ctx, icm, replyTopic, key); err != nil {
-					logger.Errorw(ctx, "send-reply-failed", log.Fields{
+				if err := kp.kafkaClient.Send(icm, replyTopic, key); err != nil {
+					logger.Errorw("send-reply-failed", log.Fields{
 						"topic": replyTopic,
 						"key":   key,
 						"error": err})
@@ -859,26 +858,26 @@
 			}()
 		}
 	} else if msg.Header.Type == ic.MessageType_RESPONSE {
-		logger.Debugw(ctx, "response-received", log.Fields{"msg-header": msg.Header})
-		go kp.dispatchResponse(ctx, msg)
+		logger.Debugw("response-received", log.Fields{"msg-header": msg.Header})
+		go kp.dispatchResponse(msg)
 	} else {
-		logger.Warnw(ctx, "unsupported-message-received", log.Fields{"msg-header": msg.Header})
+		logger.Warnw("unsupported-message-received", log.Fields{"msg-header": msg.Header})
 	}
 }
 
-func (kp *interContainerProxy) waitForMessages(ctx context.Context, ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
+func (kp *interContainerProxy) waitForMessages(ch <-chan *ic.InterContainerMessage, topic Topic, targetInterface interface{}) {
 	//	Wait for messages
 	for msg := range ch {
-		//logger.Debugw(ctx, "request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
-		go kp.handleMessage(ctx, msg, targetInterface)
+		//logger.Debugw("request-received", log.Fields{"msg": msg, "topic": topic.Name, "target": targetInterface})
+		go kp.handleMessage(msg, targetInterface)
 	}
 }
 
-func (kp *interContainerProxy) dispatchResponse(ctx context.Context, msg *ic.InterContainerMessage) {
+func (kp *interContainerProxy) dispatchResponse(msg *ic.InterContainerMessage) {
 	kp.lockTransactionIdToChannelMap.RLock()
 	defer kp.lockTransactionIdToChannelMap.RUnlock()
 	if _, exist := kp.transactionIdToChannelMap[msg.Header.Id]; !exist {
-		logger.Debugw(ctx, "no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
+		logger.Debugw("no-waiting-channel", log.Fields{"transaction": msg.Header.Id})
 		return
 	}
 	kp.transactionIdToChannelMap[msg.Header.Id].ch <- msg
@@ -888,8 +887,8 @@
 // This method is built to prevent all subscribers to receive all messages as is the case of the Subscribe
 // API. There is one response channel waiting for kafka messages before dispatching the message to the
 // corresponding waiting channel
-func (kp *interContainerProxy) subscribeForResponse(ctx context.Context, topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
-	logger.Debugw(ctx, "subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
+func (kp *interContainerProxy) subscribeForResponse(topic Topic, trnsId string) (chan *ic.InterContainerMessage, error) {
+	logger.Debugw("subscribeForResponse", log.Fields{"topic": topic.Name, "trnsid": trnsId})
 
 	// Create a specific channel for this consumers.  We cannot use the channel from the kafkaclient as it will
 	// broadcast any message for this topic to all channels waiting on it.
@@ -900,27 +899,27 @@
 	return ch, nil
 }
 
-func (kp *interContainerProxy) unSubscribeForResponse(ctx context.Context, trnsId string) error {
-	logger.Debugw(ctx, "unsubscribe-for-response", log.Fields{"trnsId": trnsId})
+func (kp *interContainerProxy) unSubscribeForResponse(trnsId string) error {
+	logger.Debugw("unsubscribe-for-response", log.Fields{"trnsId": trnsId})
 	kp.deleteFromTransactionIdToChannelMap(trnsId)
 	return nil
 }
 
-func (kp *interContainerProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
-	return kp.kafkaClient.EnableLivenessChannel(ctx, enable)
+func (kp *interContainerProxy) EnableLivenessChannel(enable bool) chan bool {
+	return kp.kafkaClient.EnableLivenessChannel(enable)
 }
 
-func (kp *interContainerProxy) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
-	return kp.kafkaClient.EnableHealthinessChannel(ctx, enable)
+func (kp *interContainerProxy) EnableHealthinessChannel(enable bool) chan bool {
+	return kp.kafkaClient.EnableHealthinessChannel(enable)
 }
 
-func (kp *interContainerProxy) SendLiveness(ctx context.Context) error {
-	return kp.kafkaClient.SendLiveness(ctx)
+func (kp *interContainerProxy) SendLiveness() error {
+	return kp.kafkaClient.SendLiveness()
 }
 
 //formatRequest formats a request to send over kafka and returns an InterContainerMessage message on success
 //or an error on failure
-func encodeRequest(ctx context.Context, rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
+func encodeRequest(rpc string, toTopic *Topic, replyTopic *Topic, key string, kvArgs ...*KVArg) (*ic.InterContainerMessage, error) {
 	requestHeader := &ic.Header{
 		Id:        uuid.New().String(),
 		Type:      ic.MessageType_REQUEST,
@@ -945,12 +944,12 @@
 		// ascertain the value interface type is a proto.Message
 		protoValue, ok := arg.Value.(proto.Message)
 		if !ok {
-			logger.Warnw(ctx, "argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
+			logger.Warnw("argument-value-not-proto-message", log.Fields{"error": ok, "Value": arg.Value})
 			err := errors.New("argument-value-not-proto-message")
 			return nil, err
 		}
 		if marshalledArg, err = ptypes.MarshalAny(protoValue); err != nil {
-			logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+			logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
 			return nil, err
 		}
 		protoArg := &ic.Argument{
@@ -963,7 +962,7 @@
 	var marshalledData *any.Any
 	var err error
 	if marshalledData, err = ptypes.MarshalAny(requestBody); err != nil {
-		logger.Warnw(ctx, "cannot-marshal-request", log.Fields{"error": err})
+		logger.Warnw("cannot-marshal-request", log.Fields{"error": err})
 		return nil, err
 	}
 	request := &ic.InterContainerMessage{
@@ -973,14 +972,14 @@
 	return request, nil
 }
 
-func decodeResponse(ctx context.Context, response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
+func decodeResponse(response *ic.InterContainerMessage) (*ic.InterContainerResponseBody, error) {
 	//	Extract the message body
 	responseBody := ic.InterContainerResponseBody{}
 	if err := ptypes.UnmarshalAny(response.Body, &responseBody); err != nil {
-		logger.Warnw(ctx, "cannot-unmarshal-response", log.Fields{"error": err})
+		logger.Warnw("cannot-unmarshal-response", log.Fields{"error": err})
 		return nil, err
 	}
-	//logger.Debugw(ctx, "response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
+	//logger.Debugw("response-decoded-successfully", log.Fields{"response-status": &responseBody.Success})
 
 	return &responseBody, nil
 
diff --git a/pkg/kafka/kafka_inter_container_library_test.go b/pkg/kafka/kafka_inter_container_library_test.go
index 8c88750..09286ad 100644
--- a/pkg/kafka/kafka_inter_container_library_test.go
+++ b/pkg/kafka/kafka_inter_container_library_test.go
@@ -16,7 +16,6 @@
 package kafka
 
 import (
-	"context"
 	"github.com/stretchr/testify/assert"
 	"testing"
 )
@@ -74,7 +73,7 @@
 		MsgClient(client),
 	)
 
-	ch := probe.EnableLivenessChannel(context.Background(), true)
+	ch := probe.EnableLivenessChannel(true)
 
 	// The channel should have one "true" message on it
 	assert.NotEmpty(t, ch)
diff --git a/pkg/kafka/sarama_client.go b/pkg/kafka/sarama_client.go
index 87c7ce4..581cf49 100755
--- a/pkg/kafka/sarama_client.go
+++ b/pkg/kafka/sarama_client.go
@@ -231,8 +231,8 @@
 	return client
 }
 
-func (sc *SaramaClient) Start(ctx context.Context) error {
-	logger.Info(ctx, "Starting-kafka-sarama-client")
+func (sc *SaramaClient) Start() error {
+	logger.Info("Starting-kafka-sarama-client")
 
 	// Create the Done channel
 	sc.doneCh = make(chan int, 1)
@@ -242,26 +242,26 @@
 	// Add a cleanup in case of failure to startup
 	defer func() {
 		if err != nil {
-			sc.Stop(ctx)
+			sc.Stop()
 		}
 	}()
 
 	// Create the Cluster Admin
-	if err = sc.createClusterAdmin(ctx); err != nil {
-		logger.Errorw(ctx, "Cannot-create-cluster-admin", log.Fields{"error": err})
+	if err = sc.createClusterAdmin(); err != nil {
+		logger.Errorw("Cannot-create-cluster-admin", log.Fields{"error": err})
 		return err
 	}
 
 	// Create the Publisher
-	if err := sc.createPublisher(ctx); err != nil {
-		logger.Errorw(ctx, "Cannot-create-kafka-publisher", log.Fields{"error": err})
+	if err := sc.createPublisher(); err != nil {
+		logger.Errorw("Cannot-create-kafka-publisher", log.Fields{"error": err})
 		return err
 	}
 
 	if sc.consumerType == DefaultConsumerType {
 		// Create the master consumers
-		if err := sc.createConsumer(ctx); err != nil {
-			logger.Errorw(ctx, "Cannot-create-kafka-consumers", log.Fields{"error": err})
+		if err := sc.createConsumer(); err != nil {
+			logger.Errorw("Cannot-create-kafka-consumers", log.Fields{"error": err})
 			return err
 		}
 	}
@@ -269,15 +269,15 @@
 	// Create the topic to consumers/channel map
 	sc.topicToConsumerChannelMap = make(map[string]*consumerChannels)
 
-	logger.Info(ctx, "kafka-sarama-client-started")
+	logger.Info("kafka-sarama-client-started")
 
 	sc.started = true
 
 	return nil
 }
 
-func (sc *SaramaClient) Stop(ctx context.Context) {
-	logger.Info(ctx, "stopping-sarama-client")
+func (sc *SaramaClient) Stop() {
+	logger.Info("stopping-sarama-client")
 
 	sc.started = false
 
@@ -286,38 +286,38 @@
 
 	if sc.producer != nil {
 		if err := sc.producer.Close(); err != nil {
-			logger.Errorw(ctx, "closing-producer-failed", log.Fields{"error": err})
+			logger.Errorw("closing-producer-failed", log.Fields{"error": err})
 		}
 	}
 
 	if sc.consumer != nil {
 		if err := sc.consumer.Close(); err != nil {
-			logger.Errorw(ctx, "closing-partition-consumer-failed", log.Fields{"error": err})
+			logger.Errorw("closing-partition-consumer-failed", log.Fields{"error": err})
 		}
 	}
 
 	for key, val := range sc.groupConsumers {
-		logger.Debugw(ctx, "closing-group-consumer", log.Fields{"topic": key})
+		logger.Debugw("closing-group-consumer", log.Fields{"topic": key})
 		if err := val.Close(); err != nil {
-			logger.Errorw(ctx, "closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
+			logger.Errorw("closing-group-consumer-failed", log.Fields{"error": err, "topic": key})
 		}
 	}
 
 	if sc.cAdmin != nil {
 		if err := sc.cAdmin.Close(); err != nil {
-			logger.Errorw(ctx, "closing-cluster-admin-failed", log.Fields{"error": err})
+			logger.Errorw("closing-cluster-admin-failed", log.Fields{"error": err})
 		}
 	}
 
 	//TODO: Clear the consumers map
 	//sc.clearConsumerChannelMap()
 
-	logger.Info(ctx, "sarama-client-stopped")
+	logger.Info("sarama-client-stopped")
 }
 
 //createTopic is an internal function to create a topic on the Kafka Broker. No locking is required as
 // the invoking function must hold the lock
-func (sc *SaramaClient) createTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) createTopic(topic *Topic, numPartition int, repFactor int) error {
 	// Set the topic details
 	topicDetail := &sarama.TopicDetail{}
 	topicDetail.NumPartitions = int32(numPartition)
@@ -329,29 +329,29 @@
 	if err := sc.cAdmin.CreateTopic(topic.Name, topicDetail, false); err != nil {
 		if err == sarama.ErrTopicAlreadyExists {
 			//	Not an error
-			logger.Debugw(ctx, "topic-already-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw("topic-already-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err})
+		logger.Errorw("create-topic-failure", log.Fields{"error": err})
 		return err
 	}
 	// TODO: Wait until the topic has been created.  No API is available in the Sarama clusterAdmin to
 	// do so.
-	logger.Debugw(ctx, "topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
+	logger.Debugw("topic-created", log.Fields{"topic": topic, "numPartition": numPartition, "replicationFactor": repFactor})
 	return nil
 }
 
 //CreateTopic is a public API to create a topic on the Kafka Broker.  It uses a lock on a specific topic to
 // ensure no two go routines are performing operations on the same topic
-func (sc *SaramaClient) CreateTopic(ctx context.Context, topic *Topic, numPartition int, repFactor int) error {
+func (sc *SaramaClient) CreateTopic(topic *Topic, numPartition int, repFactor int) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	return sc.createTopic(ctx, topic, numPartition, repFactor)
+	return sc.createTopic(topic, numPartition, repFactor)
 }
 
 //DeleteTopic removes a topic from the kafka Broker
-func (sc *SaramaClient) DeleteTopic(ctx context.Context, topic *Topic) error {
+func (sc *SaramaClient) DeleteTopic(topic *Topic) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
@@ -359,16 +359,16 @@
 	if err := sc.cAdmin.DeleteTopic(topic.Name); err != nil {
 		if err == sarama.ErrUnknownTopicOrPartition {
 			//	Not an error as does not exist
-			logger.Debugw(ctx, "topic-not-exist", log.Fields{"topic": topic.Name})
+			logger.Debugw("topic-not-exist", log.Fields{"topic": topic.Name})
 			return nil
 		}
-		logger.Errorw(ctx, "delete-topic-failed", log.Fields{"topic": topic, "error": err})
+		logger.Errorw("delete-topic-failed", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 
 	// Clear the topic from the consumer channel.  This will also close any consumers listening on that topic.
-	if err := sc.clearTopicFromConsumerChannelMap(ctx, *topic); err != nil {
-		logger.Errorw(ctx, "failure-clearing-channels", log.Fields{"topic": topic, "error": err})
+	if err := sc.clearTopicFromConsumerChannelMap(*topic); err != nil {
+		logger.Errorw("failure-clearing-channels", log.Fields{"topic": topic, "error": err})
 		return err
 	}
 	return nil
@@ -376,18 +376,18 @@
 
 // Subscribe registers a caller to a topic. It returns a channel that the caller can use to receive
 // messages from that topic
-func (sc *SaramaClient) Subscribe(ctx context.Context, topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) Subscribe(topic *Topic, kvArgs ...*KVArg) (<-chan *ic.InterContainerMessage, error) {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw(ctx, "subscribe", log.Fields{"topic": topic.Name})
+	logger.Debugw("subscribe", log.Fields{"topic": topic.Name})
 
 	// If a consumers already exist for that topic then resuse it
 	if consumerCh := sc.getConsumerChannel(topic); consumerCh != nil {
-		logger.Debugw(ctx, "topic-already-subscribed", log.Fields{"topic": topic.Name})
+		logger.Debugw("topic-already-subscribed", log.Fields{"topic": topic.Name})
 		// Create a channel specific for that consumers and add it to the consumers channel map
 		ch := make(chan *ic.InterContainerMessage)
-		sc.addChannelToConsumerChannelMap(ctx, topic, ch)
+		sc.addChannelToConsumerChannelMap(topic, ch)
 		return ch, nil
 	}
 
@@ -398,13 +398,13 @@
 	// Use the consumerType option to figure out the type of consumer to launch
 	if sc.consumerType == PartitionConsumer {
 		if sc.autoCreateTopic {
-			if err = sc.createTopic(ctx, topic, sc.numPartitions, sc.numReplicas); err != nil {
-				logger.Errorw(ctx, "create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
+			if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
+				logger.Errorw("create-topic-failure", log.Fields{"error": err, "topic": topic.Name})
 				return nil, err
 			}
 		}
-		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(ctx, topic, getOffset(kvArgs...)); err != nil {
-			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
+		if consumerListeningChannel, err = sc.setupPartitionConsumerChannel(topic, getOffset(kvArgs...)); err != nil {
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 	} else if sc.consumerType == GroupCustomer {
@@ -412,7 +412,7 @@
 		// does not consume from a precreated topic in some scenarios
 		//if sc.autoCreateTopic {
 		//	if err = sc.createTopic(topic, sc.numPartitions, sc.numReplicas); err != nil {
-		//		logger.Errorw(ctx, "create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
+		//		logger.Errorw("create-topic-failure", logger.Fields{"error": err, "topic": topic.Name})
 		//		return nil, err
 		//	}
 		//}
@@ -425,13 +425,13 @@
 			// Need to use a unique group Id per topic
 			groupId = sc.consumerGroupPrefix + topic.Name
 		}
-		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(ctx, topic, groupId, getOffset(kvArgs...)); err != nil {
-			logger.Warnw(ctx, "create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		if consumerListeningChannel, err = sc.setupGroupConsumerChannel(topic, groupId, getOffset(kvArgs...)); err != nil {
+			logger.Warnw("create-consumers-channel-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 			return nil, err
 		}
 
 	} else {
-		logger.Warnw(ctx, "unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
+		logger.Warnw("unknown-consumer-type", log.Fields{"consumer-type": sc.consumerType})
 		return nil, errors.New("unknown-consumer-type")
 	}
 
@@ -439,37 +439,37 @@
 }
 
 //UnSubscribe unsubscribe a consumer from a given topic
-func (sc *SaramaClient) UnSubscribe(ctx context.Context, topic *Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) UnSubscribe(topic *Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopic(topic)
 	defer sc.unLockTopic(topic)
 
-	logger.Debugw(ctx, "unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
+	logger.Debugw("unsubscribing-channel-from-topic", log.Fields{"topic": topic.Name})
 	var err error
-	if err = sc.removeChannelFromConsumerChannelMap(ctx, *topic, ch); err != nil {
-		logger.Errorw(ctx, "failed-removing-channel", log.Fields{"error": err})
+	if err = sc.removeChannelFromConsumerChannelMap(*topic, ch); err != nil {
+		logger.Errorw("failed-removing-channel", log.Fields{"error": err})
 	}
-	if err = sc.deleteFromGroupConsumers(ctx, topic.Name); err != nil {
-		logger.Errorw(ctx, "failed-deleting-group-consumer", log.Fields{"error": err})
+	if err = sc.deleteFromGroupConsumers(topic.Name); err != nil {
+		logger.Errorw("failed-deleting-group-consumer", log.Fields{"error": err})
 	}
 	return err
 }
 
-func (sc *SaramaClient) SubscribeForMetadata(ctx context.Context, callback func(fromTopic string, timestamp time.Time)) {
+func (sc *SaramaClient) SubscribeForMetadata(callback func(fromTopic string, timestamp time.Time)) {
 	sc.metadataCallback = callback
 }
 
-func (sc *SaramaClient) updateLiveness(ctx context.Context, alive bool) {
+func (sc *SaramaClient) updateLiveness(alive bool) {
 	// Post a consistent stream of liveness data to the channel,
 	// so that in a live state, the core does not timeout and
 	// send a forced liveness message. Production of liveness
 	// events to the channel is rate-limited by livenessChannelInterval.
 	if sc.liveness != nil {
 		if sc.alive != alive {
-			logger.Info(ctx, "update-liveness-channel-because-change")
+			logger.Info("update-liveness-channel-because-change")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		} else if time.Since(sc.lastLivenessTime) > sc.livenessChannelInterval {
-			logger.Info(ctx, "update-liveness-channel-because-interval")
+			logger.Info("update-liveness-channel-because-interval")
 			sc.liveness <- alive
 			sc.lastLivenessTime = time.Now()
 		}
@@ -477,21 +477,21 @@
 
 	// Only emit a log message when the state changes
 	if sc.alive != alive {
-		logger.Info(ctx, "set-client-alive", log.Fields{"alive": alive})
+		logger.Info("set-client-alive", log.Fields{"alive": alive})
 		sc.alive = alive
 	}
 }
 
 // Once unhealthy, we never go back
-func (sc *SaramaClient) setUnhealthy(ctx context.Context) {
+func (sc *SaramaClient) setUnhealthy() {
 	sc.healthy = false
 	if sc.healthiness != nil {
-		logger.Infow(ctx, "set-client-unhealthy", log.Fields{"healthy": sc.healthy})
+		logger.Infow("set-client-unhealthy", log.Fields{"healthy": sc.healthy})
 		sc.healthiness <- sc.healthy
 	}
 }
 
-func (sc *SaramaClient) isLivenessError(ctx context.Context, err error) bool {
+func (sc *SaramaClient) isLivenessError(err error) bool {
 	// Sarama producers and consumers encapsulate the error inside
 	// a ProducerError or ConsumerError struct.
 	if prodError, ok := err.(*sarama.ProducerError); ok {
@@ -506,48 +506,48 @@
 
 	switch err.Error() {
 	case context.DeadlineExceeded.Error():
-		logger.Info(ctx, "is-liveness-error-timeout")
+		logger.Info("is-liveness-error-timeout")
 		return true
 	case sarama.ErrOutOfBrokers.Error(): // "Kafka: client has run out of available brokers"
-		logger.Info(ctx, "is-liveness-error-no-brokers")
+		logger.Info("is-liveness-error-no-brokers")
 		return true
 	case sarama.ErrShuttingDown.Error(): // "Kafka: message received by producer in process of shutting down"
-		logger.Info(ctx, "is-liveness-error-shutting-down")
+		logger.Info("is-liveness-error-shutting-down")
 		return true
 	case sarama.ErrControllerNotAvailable.Error(): // "Kafka: controller is not available"
-		logger.Info(ctx, "is-liveness-error-not-available")
+		logger.Info("is-liveness-error-not-available")
 		return true
 	case breaker.ErrBreakerOpen.Error(): // "circuit breaker is open"
-		logger.Info(ctx, "is-liveness-error-circuit-breaker-open")
+		logger.Info("is-liveness-error-circuit-breaker-open")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "connection refused") { // "dial tcp 10.244.1.176:9092: connect: connection refused"
-		logger.Info(ctx, "is-liveness-error-connection-refused")
+		logger.Info("is-liveness-error-connection-refused")
 		return true
 	}
 
 	if strings.HasSuffix(err.Error(), "i/o timeout") { // "dial tcp 10.244.1.176:9092: i/o timeout"
-		logger.Info(ctx, "is-liveness-error-io-timeout")
+		logger.Info("is-liveness-error-io-timeout")
 		return true
 	}
 
 	// Other errors shouldn't trigger a loss of liveness
 
-	logger.Infow(ctx, "is-liveness-error-ignored", log.Fields{"err": err})
+	logger.Infow("is-liveness-error-ignored", log.Fields{"err": err})
 
 	return false
 }
 
 // send formats and sends the request onto the kafka messaging bus.
-func (sc *SaramaClient) Send(ctx context.Context, msg interface{}, topic *Topic, keys ...string) error {
+func (sc *SaramaClient) Send(msg interface{}, topic *Topic, keys ...string) error {
 
 	// Assert message is a proto message
 	var protoMsg proto.Message
 	var ok bool
 	// ascertain the value interface type is a proto.Message
 	if protoMsg, ok = msg.(proto.Message); !ok {
-		logger.Warnw(ctx, "message-not-proto-message", log.Fields{"msg": msg})
+		logger.Warnw("message-not-proto-message", log.Fields{"msg": msg})
 		return fmt.Errorf("not-a-proto-msg-%s", msg)
 	}
 
@@ -555,7 +555,7 @@
 	var err error
 	//	Create the Sarama producer message
 	if marshalled, err = proto.Marshal(protoMsg); err != nil {
-		logger.Errorw(ctx, "marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
+		logger.Errorw("marshalling-failed", log.Fields{"msg": protoMsg, "error": err})
 		return err
 	}
 	key := ""
@@ -574,12 +574,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw(ctx, "message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(ctx, true)
+		logger.Debugw("message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw(ctx, "error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(ctx, notOk) {
-			sc.updateLiveness(ctx, false)
+		logger.Debugw("error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(notOk) {
+			sc.updateLiveness(false)
 		}
 		return notOk
 	}
@@ -591,11 +591,11 @@
 // or not the channel is still live. This channel is then picked up
 // by the service (i.e. rw_core / ro_core) to update readiness status
 // and/or take other actions.
-func (sc *SaramaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
-	logger.Infow(ctx, "kafka-enable-liveness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableLivenessChannel(enable bool) chan bool {
+	logger.Infow("kafka-enable-liveness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.liveness == nil {
-			logger.Info(ctx, "kafka-create-liveness-channel")
+			logger.Info("kafka-create-liveness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -615,11 +615,11 @@
 // Enable the Healthiness monitor channel. This channel will report "false"
 // if the kafka consumers die, or some other problem occurs which is
 // catastrophic that would require re-creating the client.
-func (sc *SaramaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
-	logger.Infow(ctx, "kafka-enable-healthiness-channel", log.Fields{"enable": enable})
+func (sc *SaramaClient) EnableHealthinessChannel(enable bool) chan bool {
+	logger.Infow("kafka-enable-healthiness-channel", log.Fields{"enable": enable})
 	if enable {
 		if sc.healthiness == nil {
-			logger.Info(ctx, "kafka-create-healthiness-channel")
+			logger.Info("kafka-create-healthiness-channel")
 			// At least 1, so we can immediately post to it without blocking
 			// Setting a bigger number (10) allows the monitor to fall behind
 			// without blocking others. The monitor shouldn't really fall
@@ -638,7 +638,7 @@
 
 // send an empty message on the liveness channel to check whether connectivity has
 // been restored.
-func (sc *SaramaClient) SendLiveness(ctx context.Context) error {
+func (sc *SaramaClient) SendLiveness() error {
 	if !sc.started {
 		return fmt.Errorf("SendLiveness() called while not started")
 	}
@@ -654,12 +654,12 @@
 	// TODO: Use a lock or a different mechanism to ensure the response received corresponds to the message sent.
 	select {
 	case ok := <-sc.producer.Successes():
-		logger.Debugw(ctx, "liveness-message-sent", log.Fields{"status": ok.Topic})
-		sc.updateLiveness(ctx, true)
+		logger.Debugw("liveness-message-sent", log.Fields{"status": ok.Topic})
+		sc.updateLiveness(true)
 	case notOk := <-sc.producer.Errors():
-		logger.Debugw(ctx, "liveness-error-sending", log.Fields{"status": notOk})
-		if sc.isLivenessError(ctx, notOk) {
-			sc.updateLiveness(ctx, false)
+		logger.Debugw("liveness-error-sending", log.Fields{"status": notOk})
+		if sc.isLivenessError(notOk) {
+			sc.updateLiveness(false)
 		}
 		return notOk
 	}
@@ -686,7 +686,7 @@
 	return sarama.OffsetNewest
 }
 
-func (sc *SaramaClient) createClusterAdmin(ctx context.Context) error {
+func (sc *SaramaClient) createClusterAdmin() error {
 	config := sarama.NewConfig()
 	config.Version = sarama.V1_0_0_0
 
@@ -694,7 +694,7 @@
 	var cAdmin sarama.ClusterAdmin
 	var err error
 	if cAdmin, err = sarama.NewClusterAdmin([]string{sc.KafkaAddress}, config); err != nil {
-		logger.Errorw(ctx, "cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
+		logger.Errorw("cluster-admin-failure", log.Fields{"error": err, "broker-address": sc.KafkaAddress})
 		return err
 	}
 	sc.cAdmin = cAdmin
@@ -739,24 +739,24 @@
 	return nil
 }
 
-func (sc *SaramaClient) addChannelToConsumerChannelMap(ctx context.Context, topic *Topic, ch chan *ic.InterContainerMessage) {
+func (sc *SaramaClient) addChannelToConsumerChannelMap(topic *Topic, ch chan *ic.InterContainerMessage) {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		consumerCh.channels = append(consumerCh.channels, ch)
 		return
 	}
-	logger.Warnw(ctx, "consumers-channel-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw("consumers-channel-not-exist", log.Fields{"topic": topic.Name})
 }
 
 //closeConsumers closes a list of sarama consumers.  The consumers can either be a partition consumers or a group consumers
-func closeConsumers(ctx context.Context, consumers []interface{}) error {
+func closeConsumers(consumers []interface{}) error {
 	var err error
 	for _, consumer := range consumers {
 		//	Is it a partition consumers?
 		if partionConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			if errTemp := partionConsumer.Close(); errTemp != nil {
-				logger.Debugw(ctx, "partition!!!", log.Fields{"err": errTemp})
+				logger.Debugw("partition!!!", log.Fields{"err": errTemp})
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
 					err = nil
@@ -778,35 +778,35 @@
 	return err
 }
 
-func (sc *SaramaClient) removeChannelFromConsumerChannelMap(ctx context.Context, topic Topic, ch <-chan *ic.InterContainerMessage) error {
+func (sc *SaramaClient) removeChannelFromConsumerChannelMap(topic Topic, ch <-chan *ic.InterContainerMessage) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		// Channel will be closed in the removeChannel method
-		consumerCh.channels = removeChannel(ctx, consumerCh.channels, ch)
+		consumerCh.channels = removeChannel(consumerCh.channels, ch)
 		// If there are no more channels then we can close the consumers itself
 		if len(consumerCh.channels) == 0 {
-			logger.Debugw(ctx, "closing-consumers", log.Fields{"topic": topic})
-			err := closeConsumers(ctx, consumerCh.consumers)
+			logger.Debugw("closing-consumers", log.Fields{"topic": topic})
+			err := closeConsumers(consumerCh.consumers)
 			//err := consumerCh.consumers.Close()
 			delete(sc.topicToConsumerChannelMap, topic.Name)
 			return err
 		}
 		return nil
 	}
-	logger.Warnw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Warnw("topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return errors.New("topic-does-not-exist")
 }
 
-func (sc *SaramaClient) clearTopicFromConsumerChannelMap(ctx context.Context, topic Topic) error {
+func (sc *SaramaClient) clearTopicFromConsumerChannelMap(topic Topic) error {
 	sc.lockTopicToConsumerChannelMap.Lock()
 	defer sc.lockTopicToConsumerChannelMap.Unlock()
 	if consumerCh, exist := sc.topicToConsumerChannelMap[topic.Name]; exist {
 		for _, ch := range consumerCh.channels {
 			// Channel will be closed in the removeChannel method
-			removeChannel(ctx, consumerCh.channels, ch)
+			removeChannel(consumerCh.channels, ch)
 		}
-		err := closeConsumers(ctx, consumerCh.consumers)
+		err := closeConsumers(consumerCh.consumers)
 		//if err == sarama.ErrUnknownTopicOrPartition {
 		//	// Not an error
 		//	err = nil
@@ -815,12 +815,12 @@
 		delete(sc.topicToConsumerChannelMap, topic.Name)
 		return err
 	}
-	logger.Debugw(ctx, "topic-does-not-exist", log.Fields{"topic": topic.Name})
+	logger.Debugw("topic-does-not-exist", log.Fields{"topic": topic.Name})
 	return nil
 }
 
 //createPublisher creates the publisher which is used to send a message onto kafka
-func (sc *SaramaClient) createPublisher(ctx context.Context) error {
+func (sc *SaramaClient) createPublisher() error {
 	// This Creates the publisher
 	config := sarama.NewConfig()
 	config.Producer.Partitioner = sarama.NewRandomPartitioner
@@ -835,16 +835,16 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if producer, err := sarama.NewAsyncProducer(brokers, config); err != nil {
-		logger.Errorw(ctx, "error-starting-publisher", log.Fields{"error": err})
+		logger.Errorw("error-starting-publisher", log.Fields{"error": err})
 		return err
 	} else {
 		sc.producer = producer
 	}
-	logger.Info(ctx, "Kafka-publisher-created")
+	logger.Info("Kafka-publisher-created")
 	return nil
 }
 
-func (sc *SaramaClient) createConsumer(ctx context.Context) error {
+func (sc *SaramaClient) createConsumer() error {
 	config := sarama.NewConfig()
 	config.Consumer.Return.Errors = true
 	config.Consumer.Fetch.Min = 1
@@ -855,17 +855,17 @@
 	brokers := []string{sc.KafkaAddress}
 
 	if consumer, err := sarama.NewConsumer(brokers, config); err != nil {
-		logger.Errorw(ctx, "error-starting-consumers", log.Fields{"error": err})
+		logger.Errorw("error-starting-consumers", log.Fields{"error": err})
 		return err
 	} else {
 		sc.consumer = consumer
 	}
-	logger.Info(ctx, "Kafka-consumers-created")
+	logger.Info("Kafka-consumers-created")
 	return nil
 }
 
 // createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
+func (sc *SaramaClient) createGroupConsumer(topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
 	config := scc.NewConfig()
 	config.ClientID = uuid.New().String()
 	config.Group.Mode = scc.ConsumerModeMultiplex
@@ -883,10 +883,10 @@
 	var err error
 
 	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+		logger.Errorw("create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	logger.Debugw("create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
 	//sc.groupConsumers[topic.Name] = consumer
 	sc.addToGroupConsumers(topic.Name, consumer)
@@ -911,104 +911,104 @@
 	}
 }
 
-func (sc *SaramaClient) consumeFromAPartition(ctx context.Context, topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
-	logger.Debugw(ctx, "starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeFromAPartition(topic *Topic, consumer sarama.PartitionConsumer, consumerChnls *consumerChannels) {
+	logger.Debugw("starting-partition-consumption-loop", log.Fields{"topic": topic.Name})
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(ctx, err) {
-					sc.updateLiveness(ctx, false)
-					logger.Warnw(ctx, "partition-consumers-error", log.Fields{"error": err})
+				if sc.isLivenessError(err) {
+					sc.updateLiveness(false)
+					logger.Warnw("partition-consumers-error", log.Fields{"error": err})
 				}
 			} else {
 				// Channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
-			//logger.Debugw(ctx, "message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
+			//logger.Debugw("message-received", logger.Fields{"msg": msg, "receivedTopic": msg.Topic})
 			if !ok {
 				// channel is closed
 				break startloop
 			}
 			msgBody := msg.Value
-			sc.updateLiveness(ctx, true)
-			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(true)
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw(ctx, "partition-invalid-message", log.Fields{"error": err})
+				logger.Warnw("partition-invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 		case <-sc.doneCh:
-			logger.Infow(ctx, "partition-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow("partition-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow(ctx, "partition-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy(ctx)
+	logger.Infow("partition-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy()
 }
 
-func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
-	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) consumeGroupMessages(topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+	logger.Debugw("starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
 startloop:
 	for {
 		select {
 		case err, ok := <-consumer.Errors():
 			if ok {
-				if sc.isLivenessError(ctx, err) {
-					sc.updateLiveness(ctx, false)
+				if sc.isLivenessError(err) {
+					sc.updateLiveness(false)
 				}
-				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
+				logger.Warnw("group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
 			} else {
-				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
+				logger.Warnw("group-consumers-closed-err", log.Fields{"topic": topic.Name})
 				// channel is closed
 				break startloop
 			}
 		case msg, ok := <-consumer.Messages():
 			if !ok {
-				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
+				logger.Warnw("group-consumers-closed-msg", log.Fields{"topic": topic.Name})
 				// Channel closed
 				break startloop
 			}
-			sc.updateLiveness(ctx, true)
-			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+			sc.updateLiveness(true)
+			logger.Debugw("message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
 			msgBody := msg.Value
 			icm := &ic.InterContainerMessage{}
 			if err := proto.Unmarshal(msgBody, icm); err != nil {
-				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
+				logger.Warnw("invalid-message", log.Fields{"error": err})
 				continue
 			}
 			go sc.dispatchToConsumers(consumerChnls, icm)
 			consumer.MarkOffset(msg, "")
 		case ntf := <-consumer.Notifications():
-			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
+			logger.Debugw("group-received-notification", log.Fields{"notification": ntf})
 		case <-sc.doneCh:
-			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+			logger.Infow("group-received-exit-signal", log.Fields{"topic": topic.Name})
 			break startloop
 		}
 	}
-	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy(ctx)
+	logger.Infow("group-consumer-stopped", log.Fields{"topic": topic.Name})
+	sc.setUnhealthy()
 }
 
-func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
-	logger.Debugw(ctx, "starting-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) startConsumers(topic *Topic) error {
+	logger.Debugw("starting-consumers", log.Fields{"topic": topic.Name})
 	var consumerCh *consumerChannels
 	if consumerCh = sc.getConsumerChannel(topic); consumerCh == nil {
-		logger.Errorw(ctx, "consumers-not-exist", log.Fields{"topic": topic.Name})
+		logger.Errorw("consumers-not-exist", log.Fields{"topic": topic.Name})
 		return errors.New("consumers-not-exist")
 	}
 	// For each consumer listening for that topic, start a consumption loop
 	for _, consumer := range consumerCh.consumers {
 		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
-			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
+			go sc.consumeFromAPartition(topic, pConsumer, consumerCh)
 		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
-			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
+			go sc.consumeGroupMessages(topic, gConsumer, consumerCh)
 		} else {
-			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
+			logger.Errorw("invalid-consumer", log.Fields{"topic": topic})
 			return errors.New("invalid-consumer")
 		}
 	}
@@ -1017,12 +1017,12 @@
 
 //// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 //// for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupPartitionConsumerChannel(ctx context.Context, topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupPartitionConsumerChannel(topic *Topic, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	var pConsumers []sarama.PartitionConsumer
 	var err error
 
-	if pConsumers, err = sc.createPartitionConsumers(ctx, topic, initialOffset); err != nil {
-		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumers, err = sc.createPartitionConsumers(topic, initialOffset); err != nil {
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1044,8 +1044,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(ctx, topic); err != nil {
-			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(topic); err != nil {
+			logger.Errorw("start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1056,12 +1056,12 @@
 
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
-func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
+func (sc *SaramaClient) setupGroupConsumerChannel(topic *Topic, groupId string, initialOffset int64) (chan *ic.InterContainerMessage, error) {
 	// TODO:  Replace this development partition consumers with a group consumers
 	var pConsumer *scc.Consumer
 	var err error
-	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if pConsumer, err = sc.createGroupConsumer(topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
+		logger.Errorw("creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
@@ -1077,8 +1077,8 @@
 
 	//Start a consumers to listen on that specific topic
 	go func() {
-		if err := sc.startConsumers(ctx, topic); err != nil {
-			logger.Errorw(ctx, "start-consumers-failed", log.Fields{
+		if err := sc.startConsumers(topic); err != nil {
+			logger.Errorw("start-consumers-failed", log.Fields{
 				"topic": topic,
 				"error": err})
 		}
@@ -1087,11 +1087,11 @@
 	return consumerListeningChannel, nil
 }
 
-func (sc *SaramaClient) createPartitionConsumers(ctx context.Context, topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
-	logger.Debugw(ctx, "creating-partition-consumers", log.Fields{"topic": topic.Name})
+func (sc *SaramaClient) createPartitionConsumers(topic *Topic, initialOffset int64) ([]sarama.PartitionConsumer, error) {
+	logger.Debugw("creating-partition-consumers", log.Fields{"topic": topic.Name})
 	partitionList, err := sc.consumer.Partitions(topic.Name)
 	if err != nil {
-		logger.Warnw(ctx, "get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+		logger.Warnw("get-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
 
@@ -1099,7 +1099,7 @@
 	for _, partition := range partitionList {
 		var pConsumer sarama.PartitionConsumer
 		if pConsumer, err = sc.consumer.ConsumePartition(topic.Name, partition, initialOffset); err != nil {
-			logger.Warnw(ctx, "consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
+			logger.Warnw("consumers-partition-failure", log.Fields{"error": err, "topic": topic.Name})
 			return nil, err
 		}
 		pConsumers = append(pConsumers, pConsumer)
@@ -1107,14 +1107,14 @@
 	return pConsumers, nil
 }
 
-func removeChannel(ctx context.Context, channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
+func removeChannel(channels []chan *ic.InterContainerMessage, ch <-chan *ic.InterContainerMessage) []chan *ic.InterContainerMessage {
 	var i int
 	var channel chan *ic.InterContainerMessage
 	for i, channel = range channels {
 		if channel == ch {
 			channels[len(channels)-1], channels[i] = channels[i], channels[len(channels)-1]
 			close(channel)
-			logger.Debug(ctx, "channel-closed")
+			logger.Debug("channel-closed")
 			return channels[:len(channels)-1]
 		}
 	}
@@ -1129,14 +1129,14 @@
 	}
 }
 
-func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
+func (sc *SaramaClient) deleteFromGroupConsumers(topic string) error {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
 	if _, exist := sc.groupConsumers[topic]; exist {
 		consumer := sc.groupConsumers[topic]
 		delete(sc.groupConsumers, topic)
 		if err := consumer.Close(); err != nil {
-			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+			logger.Errorw("failure-closing-consumer", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/pkg/kafka/sarama_client_test.go b/pkg/kafka/sarama_client_test.go
index cbbfe7e..6dd9fd8 100644
--- a/pkg/kafka/sarama_client_test.go
+++ b/pkg/kafka/sarama_client_test.go
@@ -16,7 +16,6 @@
 package kafka
 
 import (
-	"context"
 	"github.com/stretchr/testify/assert"
 	"testing"
 )
@@ -25,7 +24,7 @@
 	// Note: This doesn't actually start the client
 	client := NewSaramaClient()
 
-	ch := client.EnableLivenessChannel(context.Background(), true)
+	ch := client.EnableLivenessChannel(true)
 
 	// The channel should have one "true" message on it
 	assert.NotEmpty(t, ch)
diff --git a/pkg/mocks/etcd/common.go b/pkg/mocks/etcd/common.go
index 63d4ab0..a45b4b2 100644
--- a/pkg/mocks/etcd/common.go
+++ b/pkg/mocks/etcd/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/mocks/etcd/etcd_server.go b/pkg/mocks/etcd/etcd_server.go
index 6113b3a..b4e201d 100644
--- a/pkg/mocks/etcd/etcd_server.go
+++ b/pkg/mocks/etcd/etcd_server.go
@@ -16,7 +16,6 @@
 package etcd
 
 import (
-	"context"
 	"fmt"
 	"go.etcd.io/etcd/embed"
 	"net/url"
@@ -52,25 +51,25 @@
 * :param localPersistentStorageDir: The name of a local directory which will hold the Etcd server data
 * :param logLevel: One of debug, info, warn, error, panic, or fatal. Default 'info'.
  */
-func MKConfig(ctx context.Context, configName string, clientPort, peerPort int, localPersistentStorageDir string, logLevel string) *embed.Config {
+func MKConfig(configName string, clientPort, peerPort int, localPersistentStorageDir string, logLevel string) *embed.Config {
 	cfg := embed.NewConfig()
 	cfg.Name = configName
 	cfg.Dir = localPersistentStorageDir
 	cfg.Logger = "zap"
 	if !islogLevelValid(logLevel) {
-		logger.Fatalf(ctx, "Invalid log level -%s", logLevel)
+		logger.Fatalf("Invalid log level -%s", logLevel)
 	}
 	cfg.LogLevel = logLevel
 	acurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", clientPort))
 	if err != nil {
-		logger.Fatalf(ctx, "Invalid client port -%d", clientPort)
+		logger.Fatalf("Invalid client port -%d", clientPort)
 	}
 	cfg.ACUrls = []url.URL{*acurl}
 	cfg.LCUrls = []url.URL{*acurl}
 
 	apurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", peerPort))
 	if err != nil {
-		logger.Fatalf(ctx, "Invalid peer port -%d", peerPort)
+		logger.Fatalf("Invalid peer port -%d", peerPort)
 	}
 	cfg.LPUrls = []url.URL{*apurl}
 	cfg.APUrls = []url.URL{*apurl}
@@ -92,7 +91,7 @@
 
 //StartEtcdServer creates and starts an embedded Etcd server.  A local directory to store data is created for the
 //embedded server lifetime (for the duration of a unit test.  The server runs at localhost:2379.
-func StartEtcdServer(ctx context.Context, cfg *embed.Config) *EtcdServer {
+func StartEtcdServer(cfg *embed.Config) *EtcdServer {
 	// If the server is already running, just return
 	if cfg == nil {
 		cfg = getDefaultCfg()
@@ -100,35 +99,35 @@
 	// Remove the local directory as
 	// a safeguard for the case where a prior test failed
 	if err := os.RemoveAll(cfg.Dir); err != nil {
-		logger.Fatalf(ctx, "Failure removing local directory %s", cfg.Dir)
+		logger.Fatalf("Failure removing local directory %s", cfg.Dir)
 	}
 	e, err := embed.StartEtcd(cfg)
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
 	select {
 	case <-e.Server.ReadyNotify():
-		logger.Debug(ctx, "Embedded Etcd server is ready!")
+		logger.Debug("Embedded Etcd server is ready!")
 	case <-time.After(serverStartUpTimeout):
 		e.Server.HardStop() // trigger a shutdown
 		e.Close()
-		logger.Fatal(ctx, "Embedded Etcd server took too long to start!")
+		logger.Fatal("Embedded Etcd server took too long to start!")
 	case err := <-e.Err():
 		e.Server.HardStop() // trigger a shutdown
 		e.Close()
-		logger.Fatalf(ctx, "Embedded Etcd server errored out - %s", err)
+		logger.Fatalf("Embedded Etcd server errored out - %s", err)
 	}
 	return &EtcdServer{server: e}
 }
 
 //Stop closes the embedded Etcd server and removes the local data directory as well
-func (es *EtcdServer) Stop(ctx context.Context) {
+func (es *EtcdServer) Stop() {
 	if es != nil {
 		storage := es.server.Config().Dir
 		es.server.Server.HardStop()
 		es.server.Close()
 		if err := os.RemoveAll(storage); err != nil {
-			logger.Fatalf(ctx, "Failure removing local directory %s", es.server.Config().Dir)
+			logger.Fatalf("Failure removing local directory %s", es.server.Config().Dir)
 		}
 	}
 }
diff --git a/pkg/mocks/etcd/etcd_server_test.go b/pkg/mocks/etcd/etcd_server_test.go
index b26b262..43c7a42 100644
--- a/pkg/mocks/etcd/etcd_server_test.go
+++ b/pkg/mocks/etcd/etcd_server_test.go
@@ -32,24 +32,23 @@
 var client *kvstore.EtcdClient
 
 func setup() {
-	ctx := context.Background()
 	clientPort, err := freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
 	peerPort, err := freeport.GetFreePort()
 	if err != nil {
-		logger.Fatal(ctx, err)
+		logger.Fatal(err)
 	}
-	etcdServer = StartEtcdServer(ctx, MKConfig(ctx, "voltha.mock.test", clientPort, peerPort, "voltha.lib.mocks.etcd", "error"))
+	etcdServer = StartEtcdServer(MKConfig("voltha.mock.test", clientPort, peerPort, "voltha.lib.mocks.etcd", "error"))
 	if etcdServer == nil {
-		logger.Fatal(ctx, "Embedded server failed to start")
+		logger.Fatal("Embedded server failed to start")
 	}
 	clientAddr := fmt.Sprintf("localhost:%d", clientPort)
-	client, err = kvstore.NewEtcdClient(ctx, clientAddr, 10*time.Second, log.WarnLevel)
+	client, err = kvstore.NewEtcdClient(clientAddr, 10*time.Second, log.WarnLevel)
 	if err != nil || client == nil {
-		etcdServer.Stop(ctx)
-		logger.Fatal(ctx, "Failed to create an Etcd client")
+		etcdServer.Stop()
+		logger.Fatal("Failed to create an Etcd client")
 	}
 }
 
@@ -78,10 +77,10 @@
 
 func shutdown() {
 	if client != nil {
-		client.Close(context.Background())
+		client.Close()
 	}
 	if etcdServer != nil {
-		etcdServer.Stop(context.Background())
+		etcdServer.Stop()
 	}
 }
 
diff --git a/pkg/mocks/kafka/common.go b/pkg/mocks/kafka/common.go
index e980b05..05bc5f9 100644
--- a/pkg/mocks/kafka/common.go
+++ b/pkg/mocks/kafka/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "mocks"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/mocks/kafka/endpoint_manager.go b/pkg/mocks/kafka/endpoint_manager.go
index 8b8e7f6..fedbebf 100644
--- a/pkg/mocks/kafka/endpoint_manager.go
+++ b/pkg/mocks/kafka/endpoint_manager.go
@@ -17,7 +17,6 @@
 package kafka
 
 import (
-	"context"
 	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
 )
 
@@ -28,16 +27,16 @@
 	return mock
 }
 
-func (em *EndpointManager) GetEndpoint(ctx context.Context, deviceID string, serviceType string) (kafka.Endpoint, error) {
+func (em *EndpointManager) GetEndpoint(deviceID string, serviceType string) (kafka.Endpoint, error) {
 	// TODO add mocks call and args
 	return kafka.Endpoint(serviceType), nil
 }
 
-func (em *EndpointManager) IsDeviceOwnedByService(ctx context.Context, deviceID string, serviceType string, replicaNumber int32) (bool, error) {
+func (em *EndpointManager) IsDeviceOwnedByService(deviceID string, serviceType string, replicaNumber int32) (bool, error) {
 	// TODO add mocks call and args
 	return true, nil
 }
 
-func (em *EndpointManager) GetReplicaAssignment(ctx context.Context, deviceID string, serviceType string) (kafka.ReplicaID, error) {
+func (em *EndpointManager) GetReplicaAssignment(deviceID string, serviceType string) (kafka.ReplicaID, error) {
 	return kafka.ReplicaID(1), nil
 }
diff --git a/pkg/mocks/kafka/kafka_client.go b/pkg/mocks/kafka/kafka_client.go
index 9d6f50c..7c5508b 100644
--- a/pkg/mocks/kafka/kafka_client.go
+++ b/pkg/mocks/kafka/kafka_client.go
@@ -16,7 +16,6 @@
 package kafka
 
 import (
-	"context"
 	"fmt"
 	"sync"
 	"time"
@@ -43,12 +42,12 @@
 	}
 }
 
-func (kc *KafkaClient) Start(ctx context.Context) error {
-	logger.Debug(ctx, "kafka-client-started")
+func (kc *KafkaClient) Start() error {
+	logger.Debug("kafka-client-started")
 	return nil
 }
 
-func (kc *KafkaClient) Stop(ctx context.Context) {
+func (kc *KafkaClient) Stop() {
 	kc.lock.Lock()
 	defer kc.lock.Unlock()
 	for topic, chnls := range kc.topicsChannelMap {
@@ -57,11 +56,11 @@
 		}
 		delete(kc.topicsChannelMap, topic)
 	}
-	logger.Debug(ctx, "kafka-client-stopped")
+	logger.Debug("kafka-client-stopped")
 }
 
-func (kc *KafkaClient) CreateTopic(ctx context.Context, topic *kafka.Topic, numPartition int, repFactor int) error {
-	logger.Debugw(ctx, "CreatingTopic", log.Fields{"topic": topic.Name, "numPartition": numPartition, "replicationFactor": repFactor})
+func (kc *KafkaClient) CreateTopic(topic *kafka.Topic, numPartition int, repFactor int) error {
+	logger.Debugw("CreatingTopic", log.Fields{"topic": topic.Name, "numPartition": numPartition, "replicationFactor": repFactor})
 	kc.lock.Lock()
 	defer kc.lock.Unlock()
 	if _, ok := kc.topicsChannelMap[topic.Name]; ok {
@@ -72,16 +71,16 @@
 	return nil
 }
 
-func (kc *KafkaClient) DeleteTopic(ctx context.Context, topic *kafka.Topic) error {
-	logger.Debugw(ctx, "DeleteTopic", log.Fields{"topic": topic.Name})
+func (kc *KafkaClient) DeleteTopic(topic *kafka.Topic) error {
+	logger.Debugw("DeleteTopic", log.Fields{"topic": topic.Name})
 	kc.lock.Lock()
 	defer kc.lock.Unlock()
 	delete(kc.topicsChannelMap, topic.Name)
 	return nil
 }
 
-func (kc *KafkaClient) Subscribe(ctx context.Context, topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ic.InterContainerMessage, error) {
-	logger.Debugw(ctx, "Subscribe", log.Fields{"topic": topic.Name, "args": kvArgs})
+func (kc *KafkaClient) Subscribe(topic *kafka.Topic, kvArgs ...*kafka.KVArg) (<-chan *ic.InterContainerMessage, error) {
+	logger.Debugw("Subscribe", log.Fields{"topic": topic.Name, "args": kvArgs})
 	kc.lock.Lock()
 	defer kc.lock.Unlock()
 	ch := make(chan *ic.InterContainerMessage)
@@ -94,8 +93,8 @@
 	return s[:len(s)-1]
 }
 
-func (kc *KafkaClient) UnSubscribe(ctx context.Context, topic *kafka.Topic, ch <-chan *ic.InterContainerMessage) error {
-	logger.Debugw(ctx, "UnSubscribe", log.Fields{"topic": topic.Name})
+func (kc *KafkaClient) UnSubscribe(topic *kafka.Topic, ch <-chan *ic.InterContainerMessage) error {
+	logger.Debugw("UnSubscribe", log.Fields{"topic": topic.Name})
 	kc.lock.Lock()
 	defer kc.lock.Unlock()
 	if chnls, ok := kc.topicsChannelMap[topic.Name]; ok {
@@ -113,11 +112,11 @@
 	return nil
 }
 
-func (kc *KafkaClient) SubscribeForMetadata(ctx context.Context, _ func(fromTopic string, timestamp time.Time)) {
-	logger.Debug(ctx, "SubscribeForMetadata - unimplemented")
+func (kc *KafkaClient) SubscribeForMetadata(_ func(fromTopic string, timestamp time.Time)) {
+	logger.Debug("SubscribeForMetadata - unimplemented")
 }
 
-func (kc *KafkaClient) Send(ctx context.Context, msg interface{}, topic *kafka.Topic, keys ...string) error {
+func (kc *KafkaClient) Send(msg interface{}, topic *kafka.Topic, keys ...string) error {
 	req, ok := msg.(*ic.InterContainerMessage)
 	if !ok {
 		return status.Error(codes.InvalidArgument, "msg-not-InterContainerMessage-type")
@@ -128,22 +127,22 @@
 	kc.lock.RLock()
 	defer kc.lock.RUnlock()
 	for _, ch := range kc.topicsChannelMap[topic.Name] {
-		logger.Debugw(ctx, "Publishing", log.Fields{"fromTopic": req.Header.FromTopic, "toTopic": topic.Name, "id": req.Header.Id})
+		logger.Debugw("Publishing", log.Fields{"fromTopic": req.Header.FromTopic, "toTopic": topic.Name, "id": req.Header.Id})
 		ch <- req
 	}
 	return nil
 }
 
-func (kc *KafkaClient) SendLiveness(ctx context.Context) error {
+func (kc *KafkaClient) SendLiveness() error {
 	return status.Error(codes.Unimplemented, "SendLiveness")
 }
 
-func (kc *KafkaClient) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
-	logger.Debug(ctx, "EnableLivenessChannel - unimplemented")
+func (kc *KafkaClient) EnableLivenessChannel(enable bool) chan bool {
+	logger.Debug("EnableLivenessChannel - unimplemented")
 	return nil
 }
 
-func (kc *KafkaClient) EnableHealthinessChannel(ctx context.Context, enable bool) chan bool {
-	logger.Debug(ctx, "EnableHealthinessChannel - unimplemented")
+func (kc *KafkaClient) EnableHealthinessChannel(enable bool) chan bool {
+	logger.Debug("EnableHealthinessChannel - unimplemented")
 	return nil
 }
diff --git a/pkg/mocks/kafka/kafka_client_test.go b/pkg/mocks/kafka/kafka_client_test.go
index 7753d66..0e35ec1 100644
--- a/pkg/mocks/kafka/kafka_client_test.go
+++ b/pkg/mocks/kafka/kafka_client_test.go
@@ -17,35 +17,34 @@
 package kafka
 
 import (
-	"context"
+	"testing"
+	"time"
+
 	"github.com/opencord/voltha-lib-go/v3/pkg/kafka"
 	ic "github.com/opencord/voltha-protos/v3/go/inter_container"
 	"github.com/stretchr/testify/assert"
-	"testing"
-	"time"
 )
 
 func TestKafkaClientCreateTopic(t *testing.T) {
-	ctx := context.Background()
 	cTkc := NewKafkaClient()
 	topic := kafka.Topic{Name: "myTopic"}
-	err := cTkc.CreateTopic(ctx, &topic, 1, 1)
+	err := cTkc.CreateTopic(&topic, 1, 1)
 	assert.Nil(t, err)
-	err = cTkc.CreateTopic(ctx, &topic, 1, 1)
+	err = cTkc.CreateTopic(&topic, 1, 1)
 	assert.NotNil(t, err)
 }
 
 func TestKafkaClientDeleteTopic(t *testing.T) {
 	cTkc := NewKafkaClient()
 	topic := kafka.Topic{Name: "myTopic"}
-	err := cTkc.DeleteTopic(context.Background(), &topic)
+	err := cTkc.DeleteTopic(&topic)
 	assert.Nil(t, err)
 }
 
 func TestKafkaClientSubscribeSend(t *testing.T) {
 	cTkc := NewKafkaClient()
 	topic := kafka.Topic{Name: "myTopic"}
-	ch, err := cTkc.Subscribe(context.Background(), &topic)
+	ch, err := cTkc.Subscribe(&topic)
 	assert.Nil(t, err)
 	assert.NotNil(t, ch)
 	testCh := make(chan bool)
@@ -66,7 +65,7 @@
 			testCh <- false
 		}
 	}()
-	err = cTkc.Send(context.Background(), msg, &topic)
+	err = cTkc.Send(msg, &topic)
 	assert.Nil(t, err)
 	res := <-testCh
 	assert.True(t, res)
@@ -75,20 +74,20 @@
 func TestKafkaClientUnSubscribe(t *testing.T) {
 	cTkc := NewKafkaClient()
 	topic := kafka.Topic{Name: "myTopic"}
-	ch, err := cTkc.Subscribe(context.Background(), &topic)
+	ch, err := cTkc.Subscribe(&topic)
 	assert.Nil(t, err)
 	assert.NotNil(t, ch)
-	err = cTkc.UnSubscribe(context.Background(), &topic, ch)
+	err = cTkc.UnSubscribe(&topic, ch)
 	assert.Nil(t, err)
 }
 
 func TestKafkaClientStop(t *testing.T) {
 	cTkc := NewKafkaClient()
 	topic := kafka.Topic{Name: "myTopic"}
-	ch, err := cTkc.Subscribe(context.Background(), &topic)
+	ch, err := cTkc.Subscribe(&topic)
 	assert.Nil(t, err)
 	assert.NotNil(t, ch)
-	err = cTkc.UnSubscribe(context.Background(), &topic, ch)
+	err = cTkc.UnSubscribe(&topic, ch)
 	assert.Nil(t, err)
-	cTkc.Stop(context.Background())
+	cTkc.Stop()
 }
diff --git a/pkg/mocks/kafka/kafka_inter_container_proxy.go b/pkg/mocks/kafka/kafka_inter_container_proxy.go
index 2a1b5a1..34aec95 100644
--- a/pkg/mocks/kafka/kafka_inter_container_proxy.go
+++ b/pkg/mocks/kafka/kafka_inter_container_proxy.go
@@ -53,18 +53,18 @@
 	InvokeRpcSpy InvokeRpcSpy
 }
 
-func (s *MockKafkaICProxy) Start(ctx context.Context) error { return nil }
+func (s *MockKafkaICProxy) Start() error { return nil }
 func (s *MockKafkaICProxy) GetDefaultTopic() *kafka.Topic {
 	t := kafka.Topic{
 		Name: "test-topic",
 	}
 	return &t
 }
-func (s *MockKafkaICProxy) DeleteTopic(ctx context.Context, topic kafka.Topic) error { return nil }
-func (s *MockKafkaICProxy) DeviceDiscovered(ctx context.Context, deviceId string, deviceType string, parentId string, publisher string) error {
+func (s *MockKafkaICProxy) DeleteTopic(topic kafka.Topic) error { return nil }
+func (s *MockKafkaICProxy) DeviceDiscovered(deviceId string, deviceType string, parentId string, publisher string) error {
 	return nil
 }
-func (s *MockKafkaICProxy) Stop(ctx context.Context) {}
+func (s *MockKafkaICProxy) Stop() {}
 
 func (s *MockKafkaICProxy) InvokeAsyncRPC(ctx context.Context, rpc string, toTopic *kafka.Topic, replyToTopic *kafka.Topic,
 	waitForResponse bool, key string, kvArgs ...*kafka.KVArg) chan *kafka.RpcResponse {
@@ -122,16 +122,12 @@
 
 	return success, &response
 }
-func (s *MockKafkaICProxy) SubscribeWithRequestHandlerInterface(ctx context.Context, topic kafka.Topic, handler interface{}) error {
+func (s *MockKafkaICProxy) SubscribeWithRequestHandlerInterface(topic kafka.Topic, handler interface{}) error {
 	return nil
 }
-func (s *MockKafkaICProxy) SubscribeWithDefaultRequestHandler(ctx context.Context, topic kafka.Topic, initialOffset int64) error {
+func (s *MockKafkaICProxy) SubscribeWithDefaultRequestHandler(topic kafka.Topic, initialOffset int64) error {
 	return nil
 }
-func (s *MockKafkaICProxy) UnSubscribeFromRequestHandler(ctx context.Context, topic kafka.Topic) error {
-	return nil
-}
-func (s *MockKafkaICProxy) EnableLivenessChannel(ctx context.Context, enable bool) chan bool {
-	return nil
-}
-func (s *MockKafkaICProxy) SendLiveness(ctx context.Context) error { return nil }
+func (s *MockKafkaICProxy) UnSubscribeFromRequestHandler(topic kafka.Topic) error { return nil }
+func (s *MockKafkaICProxy) EnableLivenessChannel(enable bool) chan bool           { return nil }
+func (s *MockKafkaICProxy) SendLiveness() error                                   { return nil }
diff --git a/pkg/ponresourcemanager/common.go b/pkg/ponresourcemanager/common.go
index 113b39c..0f4339e 100644
--- a/pkg/ponresourcemanager/common.go
+++ b/pkg/ponresourcemanager/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "ponresourcemanager"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "ponresourcemanager"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/ponresourcemanager/ponresourcemanager.go b/pkg/ponresourcemanager/ponresourcemanager.go
index baff575..5c10b5e 100755
--- a/pkg/ponresourcemanager/ponresourcemanager.go
+++ b/pkg/ponresourcemanager/ponresourcemanager.go
@@ -154,23 +154,23 @@
 	Globalorlocal      string
 }
 
-func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
-	logger.Infow(ctx, "kv-store-type", log.Fields{"store": storeType})
+func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+	logger.Infow("kv-store-type", log.Fields{"store": storeType})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(ctx, address, timeout)
+		return kvstore.NewConsulClient(address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func SetKVClient(ctx context.Context, Technology string, Backend string, Addr string, configClient bool) *db.Backend {
+func SetKVClient(Technology string, Backend string, Addr string, configClient bool) *db.Backend {
 	// TODO : Make sure direct call to NewBackend is working fine with backend , currently there is some
 	// issue between kv store and backend , core is not calling NewBackend directly
-	kvClient, err := newKVClient(ctx, Backend, Addr, KVSTORE_RETRY_TIMEOUT)
+	kvClient, err := newKVClient(Backend, Addr, KVSTORE_RETRY_TIMEOUT)
 	if err != nil {
-		logger.Fatalw(ctx, "Failed to init KV client\n", log.Fields{"err": err})
+		logger.Fatalw("Failed to init KV client\n", log.Fields{"err": err})
 		return nil
 	}
 
@@ -192,27 +192,27 @@
 }
 
 // NewPONResourceManager creates a new PON resource manager.
-func NewPONResourceManager(ctx context.Context, Technology string, DeviceType string, DeviceID string, Backend string, Address string) (*PONResourceManager, error) {
+func NewPONResourceManager(Technology string, DeviceType string, DeviceID string, Backend string, Address string) (*PONResourceManager, error) {
 	var PONMgr PONResourceManager
 	PONMgr.Technology = Technology
 	PONMgr.DeviceType = DeviceType
 	PONMgr.DeviceID = DeviceID
 	PONMgr.Backend = Backend
 	PONMgr.Address = Address
-	PONMgr.KVStore = SetKVClient(ctx, Technology, Backend, Address, false)
+	PONMgr.KVStore = SetKVClient(Technology, Backend, Address, false)
 	if PONMgr.KVStore == nil {
-		logger.Error(ctx, "KV Client initilization failed")
+		logger.Error("KV Client initilization failed")
 		return nil, errors.New("Failed to init KV client")
 	}
 	// init kv client to read from the config path
-	PONMgr.KVStoreForConfig = SetKVClient(ctx, Technology, Backend, Address, true)
+	PONMgr.KVStoreForConfig = SetKVClient(Technology, Backend, Address, true)
 	if PONMgr.KVStoreForConfig == nil {
-		logger.Error(ctx, "KV Config Client initilization failed")
+		logger.Error("KV Config Client initilization failed")
 		return nil, errors.New("Failed to init KV Config client")
 	}
 	// Initialize techprofile for this technology
-	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(ctx, &PONMgr, Backend, Address); PONMgr.TechProfileMgr == nil {
-		logger.Error(ctx, "Techprofile initialization failed")
+	if PONMgr.TechProfileMgr, _ = tp.NewTechProfile(&PONMgr, Backend, Address); PONMgr.TechProfileMgr == nil {
+		logger.Error("Techprofile initialization failed")
 		return nil, errors.New("Failed to init tech profile")
 	}
 	PONMgr.PonResourceRanges = make(map[string]interface{})
@@ -240,36 +240,36 @@
 	// Try to initialize the PON Resource Ranges from KV store based on the
 	// OLT model key, if available
 	if PONRMgr.OLTModel == "" {
-		logger.Error(ctx, "Failed to get OLT model")
+		logger.Error("Failed to get OLT model")
 		return false
 	}
 	Path := fmt.Sprintf(PON_RESOURCE_RANGE_CONFIG_PATH, PONRMgr.OLTModel)
 	//get resource from kv store
 	Result, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err != nil {
-		logger.Debugf(ctx, "Error in fetching resource %s from KV strore", Path)
+		logger.Debugf("Error in fetching resource %s from KV strore", Path)
 		return false
 	}
 	if Result == nil {
-		logger.Debug(ctx, "There may be no resources in the KV store in case of fresh bootup, return true")
+		logger.Debug("There may be no resources in the KV store in case of fresh bootup, return true")
 		return false
 	}
 	//update internal ranges from kv ranges. If there are missing
 	// values in the KV profile, continue to use the defaults
 	Value, err := ToByte(Result.Value)
 	if err != nil {
-		logger.Error(ctx, "Failed to convert kvpair to byte string")
+		logger.Error("Failed to convert kvpair to byte string")
 		return false
 	}
 	if err := json.Unmarshal(Value, &PONRMgr.PonResourceRanges); err != nil {
-		logger.Error(ctx, "Failed to Unmarshal json byte")
+		logger.Error("Failed to Unmarshal json byte")
 		return false
 	}
-	logger.Debug(ctx, "Init resource ranges from kvstore success")
+	logger.Debug("Init resource ranges from kvstore success")
 	return true
 }
 
-func (PONRMgr *PONResourceManager) UpdateRanges(ctx context.Context, StartIDx string, StartID uint32, EndIDx string, EndID uint32,
+func (PONRMgr *PONResourceManager) UpdateRanges(StartIDx string, StartID uint32, EndIDx string, EndID uint32,
 	SharedIDx string, SharedPoolID uint32, RMgr *PONResourceManager) {
 	/*
 	   Update the ranges for all reosurce type in the intermnal maps
@@ -281,7 +281,7 @@
 	   param: shared pool id
 	   param: global resource manager
 	*/
-	logger.Debugf(ctx, "update ranges for %s, %d", StartIDx, StartID)
+	logger.Debugf("update ranges for %s, %d", StartIDx, StartID)
 
 	if StartID != 0 {
 		if (PONRMgr.PonResourceRanges[StartIDx] == nil) || (PONRMgr.PonResourceRanges[StartIDx].(uint32) < StartID) {
@@ -301,8 +301,7 @@
 	}
 }
 
-func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ctx context.Context,
-	ONUIDStart uint32,
+func (PONRMgr *PONResourceManager) InitDefaultPONResourceRanges(ONUIDStart uint32,
 	ONUIDEnd uint32,
 	ONUIDSharedPoolID uint32,
 	AllocIDStart uint32,
@@ -336,12 +335,12 @@
 	  :param num_of_pon_ports: number of PON ports
 	  :param intf_ids: interfaces serviced by this manager
 	*/
-	PONRMgr.UpdateRanges(ctx, ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ctx, ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ctx, GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ctx, FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
-	PONRMgr.UpdateRanges(ctx, UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
-	logger.Debug(ctx, "Initialize default range values")
+	PONRMgr.UpdateRanges(ONU_ID_START_IDX, ONUIDStart, ONU_ID_END_IDX, ONUIDEnd, ONU_ID_SHARED_IDX, ONUIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(ALLOC_ID_START_IDX, AllocIDStart, ALLOC_ID_END_IDX, AllocIDEnd, ALLOC_ID_SHARED_IDX, AllocIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(GEMPORT_ID_START_IDX, GEMPortIDStart, GEMPORT_ID_END_IDX, GEMPortIDEnd, GEMPORT_ID_SHARED_IDX, GEMPortIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(FLOW_ID_START_IDX, FlowIDStart, FLOW_ID_END_IDX, FlowIDEnd, FLOW_ID_SHARED_IDX, FlowIDSharedPoolID, nil)
+	PONRMgr.UpdateRanges(UNI_ID_START_IDX, UNIIDStart, UNI_ID_END_IDX, UNIIDEnd, "", 0, nil)
+	logger.Debug("Initialize default range values")
 	var i uint32
 	if IntfIDs == nil {
 		for i = 0; i < NoOfPONPorts; i++ {
@@ -357,7 +356,7 @@
 
 	//Initialize resource pool for all PON ports.
 
-	logger.Debug(ctx, "Init resource ranges")
+	logger.Debug("Init resource ranges")
 
 	var err error
 	for _, Intf := range PONRMgr.IntfIDs {
@@ -368,7 +367,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ONU_ID,
 			PONRMgr.PonResourceRanges[ONU_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ONU_ID_END_IDX].(uint32)); err != nil {
-			logger.Error(ctx, "Failed to init ONU ID resource pool")
+			logger.Error("Failed to init ONU ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -384,7 +383,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, ALLOC_ID,
 			PONRMgr.PonResourceRanges[ALLOC_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[ALLOC_ID_END_IDX].(uint32)); err != nil {
-			logger.Error(ctx, "Failed to init ALLOC ID resource pool ")
+			logger.Error("Failed to init ALLOC ID resource pool ")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -399,7 +398,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, GEMPORT_ID,
 			PONRMgr.PonResourceRanges[GEMPORT_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[GEMPORT_ID_END_IDX].(uint32)); err != nil {
-			logger.Error(ctx, "Failed to init GEMPORT ID resource pool")
+			logger.Error("Failed to init GEMPORT ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -415,7 +414,7 @@
 		if err = PONRMgr.InitResourceIDPool(ctx, Intf, FLOW_ID,
 			PONRMgr.PonResourceRanges[FLOW_ID_START_IDX].(uint32),
 			PONRMgr.PonResourceRanges[FLOW_ID_END_IDX].(uint32)); err != nil {
-			logger.Error(ctx, "Failed to init FLOW ID resource pool")
+			logger.Error("Failed to init FLOW ID resource pool")
 			return err
 		}
 		if SharedPoolID != 0 {
@@ -429,7 +428,7 @@
 
 	//Clear resource pool for all PON ports.
 
-	logger.Debug(ctx, "Clear resource ranges")
+	logger.Debug("Clear resource ranges")
 
 	for _, Intf := range PONRMgr.IntfIDs {
 		SharedPoolID := PONRMgr.PonResourceRanges[ONU_ID_SHARED_IDX].(uint32)
@@ -437,7 +436,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ONU_ID); !status {
-			logger.Error(ctx, "Failed to clear ONU ID resource pool")
+			logger.Error("Failed to clear ONU ID resource pool")
 			return errors.New("Failed to clear ONU ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -451,7 +450,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, ALLOC_ID); !status {
-			logger.Error(ctx, "Failed to clear ALLOC ID resource pool ")
+			logger.Error("Failed to clear ALLOC ID resource pool ")
 			return errors.New("Failed to clear ALLOC ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -464,7 +463,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, GEMPORT_ID); !status {
-			logger.Error(ctx, "Failed to clear GEMPORT ID resource pool")
+			logger.Error("Failed to clear GEMPORT ID resource pool")
 			return errors.New("Failed to clear GEMPORT ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -478,7 +477,7 @@
 			Intf = SharedPoolID
 		}
 		if status := PONRMgr.ClearResourceIDPool(ctx, Intf, FLOW_ID); !status {
-			logger.Error(ctx, "Failed to clear FLOW ID resource pool")
+			logger.Error("Failed to clear FLOW ID resource pool")
 			return errors.New("Failed to clear FLOW ID resource pool")
 		}
 		if SharedPoolID != 0 {
@@ -505,9 +504,9 @@
 		return SharedResourceMgr.InitResourceIDPool(ctx, Intf, ResourceType, StartID, EndID)
 	}
 
-	Path := PONRMgr.GetPath(ctx, Intf, ResourceType)
+	Path := PONRMgr.GetPath(Intf, ResourceType)
 	if Path == "" {
-		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
+		logger.Errorf("Failed to get path for resource type %s", ResourceType)
 		return fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
 
@@ -515,7 +514,7 @@
 	//checked for its presence if not kv store update happens
 	Res, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (Res != nil) {
-		logger.Debugf(ctx, "Resource %s already present in store ", Path)
+		logger.Debugf("Resource %s already present in store ", Path)
 		return nil
 	} else {
 		var excluded []uint32
@@ -523,23 +522,23 @@
 			//get gem port ids defined in the KV store, if any, and exclude them from the gem port id pool
 			if reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx); defined {
 				excluded = reservedGemPortIds
-				logger.Debugw(ctx, "Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
+				logger.Debugw("Excluding some ports from GEM port id pool", log.Fields{"excluded gem ports": excluded})
 			}
 		}
-		FormatResult, err := PONRMgr.FormatResource(ctx, Intf, StartID, EndID, excluded)
+		FormatResult, err := PONRMgr.FormatResource(Intf, StartID, EndID, excluded)
 		if err != nil {
-			logger.Errorf(ctx, "Failed to format resource")
+			logger.Errorf("Failed to format resource")
 			return err
 		}
 		// Add resource as json in kv store.
 		err = PONRMgr.KVStore.Put(ctx, Path, FormatResult)
 		if err == nil {
-			logger.Debug(ctx, "Successfuly posted to kv store")
+			logger.Debug("Successfuly posted to kv store")
 			return err
 		}
 	}
 
-	logger.Debug(ctx, "Error initializing pool")
+	logger.Debug("Error initializing pool")
 
 	return err
 }
@@ -549,7 +548,7 @@
 	// read reserved gem ports from the config path
 	KvPair, err := PONRMgr.KVStoreForConfig.Get(ctx, RESERVED_GEMPORT_IDS_PATH)
 	if err != nil {
-		logger.Errorw(ctx, "Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
+		logger.Errorw("Unable to get reserved GEM port ids from the kv store", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if KvPair == nil || KvPair.Value == nil {
@@ -558,17 +557,17 @@
 	}
 	Val, err := kvstore.ToByte(KvPair.Value)
 	if err != nil {
-		logger.Errorw(ctx, "Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
+		logger.Errorw("Failed to convert reserved gem port ids into byte array", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	if err = json.Unmarshal(Val, &reservedGemPortIds); err != nil {
-		logger.Errorw(ctx, "Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
+		logger.Errorw("Failed to unmarshal reservedGemPortIds", log.Fields{"err": err})
 		return reservedGemPortIds, false
 	}
 	return reservedGemPortIds, true
 }
 
-func (PONRMgr *PONResourceManager) FormatResource(ctx context.Context, IntfID uint32, StartIDx uint32, EndIDx uint32,
+func (PONRMgr *PONResourceManager) FormatResource(IntfID uint32, StartIDx uint32, EndIDx uint32,
 	Excluded []uint32) ([]byte, error) {
 	/*
 	   Format resource as json.
@@ -590,22 +589,22 @@
 	*/
 	var TSData *bitmap.Threadsafe
 	if TSData = bitmap.NewTS(int(EndIDx)); TSData == nil {
-		logger.Error(ctx, "Failed to create a bitmap")
+		logger.Error("Failed to create a bitmap")
 		return nil, errors.New("Failed to create bitmap")
 	}
 	for _, excludedID := range Excluded {
 		if excludedID < StartIDx || excludedID > EndIDx {
-			logger.Warnf(ctx, "Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
+			logger.Warnf("Cannot reserve %d. It must be in the range of [%d, %d]", excludedID,
 				StartIDx, EndIDx)
 			continue
 		}
-		PONRMgr.reserveID(ctx, TSData, StartIDx, excludedID)
+		PONRMgr.reserveID(TSData, StartIDx, excludedID)
 	}
 	Resource[POOL] = TSData.Data(false) //we pass false so as the TSData lib api does not do a copy of the data and return
 
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		logger.Errorf(ctx, "Failed to marshall resource")
+		logger.Errorf("Failed to marshall resource")
 		return nil, err
 	}
 	return Value, err
@@ -625,7 +624,7 @@
 
 	Resource, err := PONRMgr.KVStore.Get(ctx, Path)
 	if (err != nil) || (Resource == nil) {
-		logger.Debugf(ctx, "Resource  unavailable at %s", Path)
+		logger.Debugf("Resource  unavailable at %s", Path)
 		return nil, err
 	}
 
@@ -637,7 +636,7 @@
 	// decode resource fetched from backend store to dictionary
 	err = json.Unmarshal(Value, &Result)
 	if err != nil {
-		logger.Error(ctx, "Failed to decode resource")
+		logger.Error("Failed to decode resource")
 		return Result, err
 	}
 	/*
@@ -647,20 +646,20 @@
 	*/
 	Str, err = ToString(Result[POOL])
 	if err != nil {
-		logger.Error(ctx, "Failed to conver to kv pair to string")
+		logger.Error("Failed to conver to kv pair to string")
 		return Result, err
 	}
 	Decode64, _ := base64.StdEncoding.DecodeString(Str)
 	Result[POOL], err = ToByte(Decode64)
 	if err != nil {
-		logger.Error(ctx, "Failed to convert resource pool to byte")
+		logger.Error("Failed to convert resource pool to byte")
 		return Result, err
 	}
 
 	return Result, err
 }
 
-func (PONRMgr *PONResourceManager) GetPath(ctx context.Context, IntfID uint32, ResourceType string) string {
+func (PONRMgr *PONResourceManager) GetPath(IntfID uint32, ResourceType string) string {
 	/*
 	   Get path for given resource type.
 	   :param pon_intf_id: OLT PON interface id
@@ -686,7 +685,7 @@
 	} else if ResourceType == FLOW_ID {
 		Path = fmt.Sprintf(FLOW_ID_POOL_PATH, PONRMgr.DeviceID, IntfID)
 	} else {
-		logger.Error(ctx, "Invalid resource pool identifier")
+		logger.Error("Invalid resource pool identifier")
 	}
 	return Path
 }
@@ -701,7 +700,7 @@
 	    alloc_id/gemport_id, onu_id or invalid type respectively
 	*/
 	if NumIDs < 1 {
-		logger.Error(ctx, "Invalid number of resources requested")
+		logger.Error("Invalid number of resources requested")
 		return nil, fmt.Errorf("Invalid number of resources requested %d", NumIDs)
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -710,34 +709,34 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.GetResourceID(ctx, IntfID, ResourceType, NumIDs)
 	}
-	logger.Debugf(ctx, "Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
+	logger.Debugf("Fetching resource from %s rsrc mgr for resource %s", PONRMgr.Globalorlocal, ResourceType)
 
-	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
-		logger.Errorf(ctx, "Failed to get path for resource type %s", ResourceType)
+		logger.Errorf("Failed to get path for resource type %s", ResourceType)
 		return nil, fmt.Errorf("Failed to get path for resource type %s", ResourceType)
 	}
-	logger.Debugf(ctx, "Get resource for type %s on path %s", ResourceType, Path)
+	logger.Debugf("Get resource for type %s on path %s", ResourceType, Path)
 	var Result []uint32
 	var NextID uint32
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if (err == nil) && (ResourceType == ONU_ID) || (ResourceType == FLOW_ID) {
-		if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
-			logger.Error(ctx, "Failed to Generate ID")
+		if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+			logger.Error("Failed to Generate ID")
 			return Result, err
 		}
 		Result = append(Result, NextID)
 	} else if (err == nil) && ((ResourceType == GEMPORT_ID) || (ResourceType == ALLOC_ID)) {
 		if NumIDs == 1 {
-			if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
-				logger.Error(ctx, "Failed to Generate ID")
+			if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+				logger.Error("Failed to Generate ID")
 				return Result, err
 			}
 			Result = append(Result, NextID)
 		} else {
 			for NumIDs > 0 {
-				if NextID, err = PONRMgr.GenerateNextID(ctx, Resource); err != nil {
-					logger.Error(ctx, "Failed to Generate ID")
+				if NextID, err = PONRMgr.GenerateNextID(Resource); err != nil {
+					logger.Error("Failed to Generate ID")
 					return Result, err
 				}
 				Result = append(Result, NextID)
@@ -745,13 +744,13 @@
 			}
 		}
 	} else {
-		logger.Error(ctx, "get resource failed")
+		logger.Error("get resource failed")
 		return Result, err
 	}
 
 	//Update resource in kv store
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return nil, fmt.Errorf("Failed to update resource %s", Path)
 	}
 	return Result, nil
@@ -777,11 +776,11 @@
 	   :return boolean: True if all IDs in given release_content release else False
 	*/
 	if !checkValidResourceType(ResourceType) {
-		logger.Error(ctx, "Invalid resource type")
+		logger.Error("Invalid resource type")
 		return false
 	}
 	if ReleaseContent == nil {
-		logger.Debug(ctx, "Nothing to release")
+		logger.Debug("Nothing to release")
 		return true
 	}
 	// delegate to the master instance if sharing enabled across instances
@@ -789,21 +788,21 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.FreeResourceID(ctx, IntfID, ResourceType, ReleaseContent)
 	}
-	Path := PONRMgr.GetPath(ctx, IntfID, ResourceType)
+	Path := PONRMgr.GetPath(IntfID, ResourceType)
 	if Path == "" {
-		logger.Error(ctx, "Failed to get path")
+		logger.Error("Failed to get path")
 		return false
 	}
 	Resource, err := PONRMgr.GetResource(ctx, Path)
 	if err != nil {
-		logger.Error(ctx, "Failed to get resource")
+		logger.Error("Failed to get resource")
 		return false
 	}
 	for _, Val := range ReleaseContent {
-		PONRMgr.ReleaseID(ctx, Resource, Val)
+		PONRMgr.ReleaseID(Resource, Val)
 	}
 	if PONRMgr.UpdateResource(ctx, Path, Resource) != nil {
-		logger.Errorf(ctx, "Free resource for %s failed", Path)
+		logger.Errorf("Free resource for %s failed", Path)
 		return false
 	}
 	return true
@@ -819,12 +818,12 @@
 	// TODO resource[POOL] = resource[POOL].bin
 	Value, err := json.Marshal(Resource)
 	if err != nil {
-		logger.Error(ctx, "failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 	err = PONRMgr.KVStore.Put(ctx, Path, Value)
 	if err != nil {
-		logger.Error(ctx, "failed to put data to kv store %s", Path)
+		logger.Error("failed to put data to kv store %s", Path)
 		return err
 	}
 	return nil
@@ -841,17 +840,17 @@
 	if SharedResourceMgr != nil && PONRMgr != SharedResourceMgr {
 		return SharedResourceMgr.ClearResourceIDPool(ctx, contIntfID, ResourceType)
 	}
-	Path := PONRMgr.GetPath(ctx, contIntfID, ResourceType)
+	Path := PONRMgr.GetPath(contIntfID, ResourceType)
 	if Path == "" {
-		logger.Error(ctx, "Failed to get path")
+		logger.Error("Failed to get path")
 		return false
 	}
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		logger.Errorf(ctx, "Failed to delete resource %s", Path)
+		logger.Errorf("Failed to delete resource %s", Path)
 		return false
 	}
-	logger.Debugf(ctx, "Cleared resource %s", Path)
+	logger.Debugf("Cleared resource %s", Path)
 	return true
 }
 
@@ -865,7 +864,7 @@
 	var AllocIDs []byte
 	Result := PONRMgr.KVStore.Put(ctx, AllocIDPath, AllocIDs)
 	if Result != nil {
-		logger.Error(ctx, "Failed to update the KV store")
+		logger.Error("Failed to update the KV store")
 		return
 	}
 	// initialize pon_intf_onu_id tuple to gemport_ids map
@@ -873,7 +872,7 @@
 	var GEMPortIDs []byte
 	Result = PONRMgr.KVStore.Put(ctx, GEMPortIDPath, GEMPortIDs)
 	if Result != nil {
-		logger.Error(ctx, "Failed to update the KV store")
+		logger.Error("Failed to update the KV store")
 		return
 	}
 }
@@ -887,14 +886,14 @@
 	var err error
 	AllocIDPath := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	if err = PONRMgr.KVStore.Delete(ctx, AllocIDPath); err != nil {
-		logger.Errorf(ctx, "Failed to remove resource %s", AllocIDPath)
+		logger.Errorf("Failed to remove resource %s", AllocIDPath)
 		return false
 	}
 	// remove pon_intf_onu_id tuple to gemport_ids map
 	GEMPortIDPath := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, PONIntfONUID)
 	err = PONRMgr.KVStore.Delete(ctx, GEMPortIDPath)
 	if err != nil {
-		logger.Errorf(ctx, "Failed to remove resource %s", GEMPortIDPath)
+		logger.Errorf("Failed to remove resource %s", GEMPortIDPath)
 		return false
 	}
 
@@ -903,14 +902,14 @@
 		for _, Flow := range FlowIDs {
 			FlowIDInfoPath := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, PONIntfONUID, Flow.Value)
 			if err = PONRMgr.KVStore.Delete(ctx, FlowIDInfoPath); err != nil {
-				logger.Errorf(ctx, "Failed to remove resource %s", FlowIDInfoPath)
+				logger.Errorf("Failed to remove resource %s", FlowIDInfoPath)
 				return false
 			}
 		}
 	}
 
 	if err = PONRMgr.KVStore.Delete(ctx, FlowIDPath); err != nil {
-		logger.Errorf(ctx, "Failed to remove resource %s", FlowIDPath)
+		logger.Errorf("Failed to remove resource %s", FlowIDPath)
 		return false
 	}
 
@@ -931,11 +930,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				logger.Errorw(ctx, "Failed to convert into byte array", log.Fields{"error": err})
+				logger.Errorw("Failed to convert into byte array", log.Fields{"error": err})
 				return Data
 			}
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Error(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				logger.Error("Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
@@ -951,19 +950,19 @@
 	*/
 
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	logger.Debugf(ctx, "Getting current gemports for %s", Path)
+	logger.Debugf("Getting current gemports for %s", Path)
 	var Data []uint32
 	Value, err := PONRMgr.KVStore.Get(ctx, Path)
 	if err == nil {
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
 				return Data
 			}
 		}
 	} else {
-		logger.Errorf(ctx, "Failed to get data from kvstore for %s", Path)
+		logger.Errorf("Failed to get data from kvstore for %s", Path)
 	}
 	return Data
 }
@@ -983,7 +982,7 @@
 		if Value != nil {
 			Val, _ := ToByte(Value.Value)
 			if err = json.Unmarshal(Val, &Data); err != nil {
-				logger.Error(ctx, "Failed to unmarshal")
+				logger.Error("Failed to unmarshal")
 				return Data
 			}
 		}
@@ -1007,11 +1006,11 @@
 		if Value != nil {
 			Val, err := ToByte(Value.Value)
 			if err != nil {
-				logger.Errorw(ctx, "Failed to convert flowinfo into byte array", log.Fields{"error": err})
+				logger.Errorw("Failed to convert flowinfo into byte array", log.Fields{"error": err})
 				return err
 			}
 			if err = json.Unmarshal(Val, Data); err != nil {
-				logger.Errorw(ctx, "Failed to unmarshal", log.Fields{"error": err})
+				logger.Errorw("Failed to unmarshal", log.Fields{"error": err})
 				return err
 			}
 		}
@@ -1028,7 +1027,7 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 
 	if err := PONRMgr.KVStore.Delete(ctx, Path); err != nil {
-		logger.Errorf(ctx, "Falied to remove resource %s", Path)
+		logger.Errorf("Falied to remove resource %s", Path)
 		return false
 	}
 	return true
@@ -1045,12 +1044,12 @@
 	Path := fmt.Sprintf(ALLOC_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
 	Value, err = json.Marshal(AllocIDs)
 	if err != nil {
-		logger.Error(ctx, "failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1066,15 +1065,15 @@
 	var Value []byte
 	var err error
 	Path := fmt.Sprintf(GEMPORT_ID_RESOURCE_MAP_PATH, PONRMgr.DeviceID, IntfONUID)
-	logger.Debugf(ctx, "Updating gemport ids for %s", Path)
+	logger.Debugf("Updating gemport ids for %s", Path)
 	Value, err = json.Marshal(GEMPortIDs)
 	if err != nil {
-		logger.Error(ctx, "failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1125,12 +1124,12 @@
 	}
 	Value, err = json.Marshal(FlowIDs)
 	if err != nil {
-		logger.Error(ctx, "Failed to Marshal")
+		logger.Error("Failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1149,18 +1148,18 @@
 	Path := fmt.Sprintf(FLOW_ID_INFO_PATH, PONRMgr.DeviceID, IntfONUID, FlowID)
 	Value, err = json.Marshal(FlowData)
 	if err != nil {
-		logger.Error(ctx, "failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
 }
 
-func (PONRMgr *PONResourceManager) GenerateNextID(ctx context.Context, Resource map[string]interface{}) (uint32, error) {
+func (PONRMgr *PONResourceManager) GenerateNextID(Resource map[string]interface{}) (uint32, error) {
 	/*
 	   Generate unique id having OFFSET as start
 	   :param resource: resource used to generate ID
@@ -1168,12 +1167,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		logger.Error(ctx, "Failed to convert resource to byte array")
+		logger.Error("Failed to convert resource to byte array")
 		return 0, err
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		logger.Error(ctx, "Failed to get data from byte array")
+		logger.Error("Failed to get data from byte array")
 		return 0, errors.New("Failed to get data from byte array")
 	}
 
@@ -1187,11 +1186,11 @@
 	Data.Set(Idx, true)
 	res := uint32(Resource[START_IDX].(float64))
 	Resource[POOL] = Data.Data(false)
-	logger.Debugf(ctx, "Generated ID for %d", (uint32(Idx) + res))
+	logger.Debugf("Generated ID for %d", (uint32(Idx) + res))
 	return (uint32(Idx) + res), err
 }
 
-func (PONRMgr *PONResourceManager) ReleaseID(ctx context.Context, Resource map[string]interface{}, Id uint32) bool {
+func (PONRMgr *PONResourceManager) ReleaseID(Resource map[string]interface{}, Id uint32) bool {
 	/*
 	   Release unique id having OFFSET as start index.
 	   :param resource: resource used to release ID
@@ -1199,12 +1198,12 @@
 	*/
 	ByteArray, err := ToByte(Resource[POOL])
 	if err != nil {
-		logger.Error(ctx, "Failed to convert resource to byte array")
+		logger.Error("Failed to convert resource to byte array")
 		return false
 	}
 	Data := bitmap.TSFromData(ByteArray, false)
 	if Data == nil {
-		logger.Error(ctx, "Failed to get resource pool")
+		logger.Error("Failed to get resource pool")
 		return false
 	}
 	Idx := Id - uint32(Resource[START_IDX].(float64))
@@ -1218,10 +1217,10 @@
 :param Resource: resource used to reserve ID
 :param Id: ID to be reserved
 */
-func (PONRMgr *PONResourceManager) reserveID(ctx context.Context, TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
+func (PONRMgr *PONResourceManager) reserveID(TSData *bitmap.Threadsafe, StartIndex uint32, Id uint32) bool {
 	Data := bitmap.TSFromData(TSData.Data(false), false)
 	if Data == nil {
-		logger.Error(ctx, "Failed to get resource pool")
+		logger.Error("Failed to get resource pool")
 		return false
 	}
 	Idx := Id - StartIndex
@@ -1278,12 +1277,12 @@
 	Path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfID)
 	Value, err = json.Marshal(onuGemData)
 	if err != nil {
-		logger.Error(ctx, "failed to Marshal")
+		logger.Error("failed to Marshal")
 		return err
 	}
 
 	if err = PONRMgr.KVStore.Put(ctx, Path, Value); err != nil {
-		logger.Errorf(ctx, "Failed to update resource %s", Path)
+		logger.Errorf("Failed to update resource %s", Path)
 		return err
 	}
 	return err
@@ -1300,22 +1299,22 @@
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, IntfId)
 	value, err := PONRMgr.KVStore.Get(ctx, path)
 	if err != nil {
-		logger.Errorw(ctx, "Failed to get from kv store", log.Fields{"path": path})
+		logger.Errorw("Failed to get from kv store", log.Fields{"path": path})
 		return err
 	} else if value == nil {
-		logger.Debug(ctx, "No onuinfo for path", log.Fields{"path": path})
+		logger.Debug("No onuinfo for path", log.Fields{"path": path})
 		return nil // returning nil as this could happen if there are no onus for the interface yet
 	}
 	if Val, err = kvstore.ToByte(value.Value); err != nil {
-		logger.Error(ctx, "Failed to convert to byte array")
+		logger.Error("Failed to convert to byte array")
 		return err
 	}
 
 	if err = json.Unmarshal(Val, &onuGemInfo); err != nil {
-		logger.Error(ctx, "Failed to unmarshall")
+		logger.Error("Failed to unmarshall")
 		return err
 	}
-	logger.Debugw(ctx, "found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
+	logger.Debugw("found onuinfo from path", log.Fields{"path": path, "onuinfo": onuGemInfo})
 	return err
 }
 
@@ -1327,7 +1326,7 @@
 
 	path := fmt.Sprintf(ONU_GEM_INFO_PATH, PONRMgr.DeviceID, intfId)
 	if err := PONRMgr.KVStore.Delete(ctx, path); err != nil {
-		logger.Errorf(ctx, "Falied to remove resource %s", path)
+		logger.Errorf("Falied to remove resource %s", path)
 		return err
 	}
 	return nil
diff --git a/pkg/ponresourcemanager/ponresourcemanager_test.go b/pkg/ponresourcemanager/ponresourcemanager_test.go
index 0eab36e..b91107c 100644
--- a/pkg/ponresourcemanager/ponresourcemanager_test.go
+++ b/pkg/ponresourcemanager/ponresourcemanager_test.go
@@ -39,7 +39,7 @@
 	resourceMap map[string]interface{}
 }
 
-func newMockKvClient(ctx context.Context) *MockResKVClient {
+func newMockKvClient() *MockResKVClient {
 	var mockResKVClient MockResKVClient
 	mockResKVClient.resourceMap = make(map[string]interface{})
 	return &mockResKVClient
@@ -52,16 +52,16 @@
 
 // Get mock function implementation for KVClient
 func (kvclient *MockResKVClient) Get(ctx context.Context, key string) (*kvstore.KVPair, error) {
-	logger.Debugw(ctx, "Get of MockKVClient called", log.Fields{"key": key})
+	logger.Debugw("Get of MockKVClient called", log.Fields{"key": key})
 	if key != "" {
 		if strings.Contains(key, RESERVED_GEMPORT_IDS_PATH) {
-			logger.Debug(ctx, "Getting Key:", RESERVED_GEMPORT_IDS_PATH)
+			logger.Debug("Getting Key:", RESERVED_GEMPORT_IDS_PATH)
 			reservedGemPorts := []uint32{RESERVED_GEM_PORT_ID}
 			str, _ := json.Marshal(reservedGemPorts)
 			return kvstore.NewKVPair(key, str, "mock", 3000, 1), nil
 		}
 		if strings.Contains(key, GEM_POOL_PATH) {
-			logger.Debug(ctx, "Getting Key:", GEM_POOL_PATH)
+			logger.Debug("Getting Key:", GEM_POOL_PATH)
 			resource := kvclient.resourceMap[key]
 			return kvstore.NewKVPair(key, resource, "mock", 3000, 1), nil
 		}
@@ -129,38 +129,38 @@
 }
 
 // CloseWatch mock function implementation for KVClient
-func (kvclient *MockResKVClient) CloseWatch(ctx context.Context, key string, ch chan *kvstore.Event) {
+func (kvclient *MockResKVClient) CloseWatch(key string, ch chan *kvstore.Event) {
 }
 
 // Close mock function implementation for KVClient
-func (kvclient *MockResKVClient) Close(ctx context.Context) {
+func (kvclient *MockResKVClient) Close() {
 }
 
 func TestExcludeReservedGemPortIdFromThePool(t *testing.T) {
-	ctx := context.Background()
-	PONRMgr, err := NewPONResourceManager(ctx, "gpon", "onu", "olt1",
+	PONRMgr, err := NewPONResourceManager("gpon", "onu", "olt1",
 		"etcd", "1:1")
 	if err != nil {
 		return
 	}
 	PONRMgr.KVStore = &db.Backend{
-		Client: newMockKvClient(ctx),
+		Client: newMockKvClient(),
 	}
 
 	PONRMgr.KVStoreForConfig = &db.Backend{
-		Client: newMockKvClient(ctx),
+		Client: newMockKvClient(),
 	}
 	// create a pool in the range of [1,16]
 	// and exclude id 5 from this pool
 	StartIndex := uint32(1)
 	EndIndex := uint32(16)
 
+	ctx := context.Background()
 	reservedGemPortIds, defined := PONRMgr.getReservedGemPortIdsFromKVStore(ctx)
 	if !defined {
 		return
 	}
 
-	FormatResult, err := PONRMgr.FormatResource(ctx, 1, StartIndex, EndIndex, reservedGemPortIds)
+	FormatResult, err := PONRMgr.FormatResource(1, StartIndex, EndIndex, reservedGemPortIds)
 	if err != nil {
 		t.Error("Failed to format resource", err)
 		return
@@ -181,7 +181,7 @@
 			return
 		}
 		// get a gem port id from the pool
-		nextID, err := PONRMgr.GenerateNextID(ctx, resource)
+		nextID, err := PONRMgr.GenerateNextID(resource)
 		if err != nil {
 			t.Error("Failed to get gem port id from the pool", err)
 			return
diff --git a/pkg/probe/common.go b/pkg/probe/common.go
index 14857ab..211419d 100644
--- a/pkg/probe/common.go
+++ b/pkg/probe/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "probe"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/probe/probe.go b/pkg/probe/probe.go
index 732d6df..e89d5bc 100644
--- a/pkg/probe/probe.go
+++ b/pkg/probe/probe.go
@@ -109,7 +109,7 @@
 }
 
 // RegisterService register one or more service names with the probe, status will be track against service name
-func (p *Probe) RegisterService(ctx context.Context, names ...string) {
+func (p *Probe) RegisterService(names ...string) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -118,7 +118,7 @@
 	for _, name := range names {
 		if _, ok := p.status[name]; !ok {
 			p.status[name] = ServiceStatusUnknown
-			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
+			logger.Debugw("probe-service-registered", log.Fields{"service-name": name})
 		}
 	}
 
@@ -136,7 +136,7 @@
 }
 
 // UpdateStatus utility function to send a service update to the probe
-func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
+func (p *Probe) UpdateStatus(name string, status ServiceStatus) {
 	p.mutex.Lock()
 	defer p.mutex.Unlock()
 	if p.status == nil {
@@ -161,7 +161,7 @@
 	} else {
 		p.isHealthy = defaultHealthFunc(p.status)
 	}
-	logger.Debugw(ctx, "probe-service-status-updated",
+	logger.Debugw("probe-service-status-updated",
 		log.Fields{
 			"service-name": name,
 			"status":       status.String(),
@@ -204,7 +204,7 @@
 func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
 	p := GetProbeFromContext(ctx)
 	if p != nil {
-		p.UpdateStatus(ctx, name, status)
+		p.UpdateStatus(name, status)
 	}
 }
 
@@ -228,26 +228,25 @@
 	}
 }
 func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
-	ctx := context.Background()
 	p.mutex.RLock()
 	defer p.mutex.RUnlock()
 	w.Header().Set("Content-Type", "application/json")
 	if _, err := w.Write([]byte("{")); err != nil {
-		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		logger.Errorw("write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
 	comma := ""
 	for c, s := range p.status {
 		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
-			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+			logger.Errorw("write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
 		}
 		comma = ", "
 	}
 	if _, err := w.Write([]byte("}")); err != nil {
-		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		logger.Errorw("write-response", log.Fields{"error": err})
 		w.WriteHeader(http.StatusInternalServerError)
 		return
 	}
@@ -255,7 +254,7 @@
 }
 
 // ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
-func (p *Probe) ListenAndServe(ctx context.Context, address string) {
+func (p *Probe) ListenAndServe(address string) {
 	mux := http.NewServeMux()
 
 	// Returns the result of the readyFunc calculation
@@ -270,7 +269,7 @@
 		Addr:    address,
 		Handler: mux,
 	}
-	logger.Fatal(ctx, s.ListenAndServe())
+	logger.Fatal(s.ListenAndServe())
 }
 
 func (p *Probe) IsReady() bool {
diff --git a/pkg/probe/probe_test.go b/pkg/probe/probe_test.go
index cae17e5..77cd6a8 100644
--- a/pkg/probe/probe_test.go
+++ b/pkg/probe/probe_test.go
@@ -72,7 +72,7 @@
 func TestRegisterOneService(t *testing.T) {
 	p := &Probe{}
 
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 
 	assert.Equal(t, 1, len(p.status), "wrong number of services")
 
@@ -83,7 +83,7 @@
 func TestRegisterMultipleServices(t *testing.T) {
 	p := &Probe{}
 
-	p.RegisterService(context.Background(), "one", "two", "three", "four")
+	p.RegisterService("one", "two", "three", "four")
 
 	assert.Equal(t, 4, len(p.status), "wrong number of services")
 
@@ -99,10 +99,10 @@
 
 func TestRegisterMultipleServicesIncremental(t *testing.T) {
 	p := &Probe{}
-	ctx := context.Background()
-	p.RegisterService(ctx, "one")
-	p.RegisterService(ctx, "two")
-	p.RegisterService(ctx, "three", "four")
+
+	p.RegisterService("one")
+	p.RegisterService("two")
+	p.RegisterService("three", "four")
 
 	assert.Equal(t, 4, len(p.status), "wrong number of services")
 
@@ -119,7 +119,7 @@
 func TestRegisterMultipleServicesDuplicates(t *testing.T) {
 	p := &Probe{}
 
-	p.RegisterService(context.Background(), "one", "one", "one", "two")
+	p.RegisterService("one", "one", "one", "two")
 
 	assert.Equal(t, 2, len(p.status), "wrong number of services")
 
@@ -131,10 +131,10 @@
 
 func TestRegisterMultipleServicesDuplicatesIncremental(t *testing.T) {
 	p := &Probe{}
-	ctx := context.Background()
-	p.RegisterService(ctx, "one")
-	p.RegisterService(ctx, "one")
-	p.RegisterService(ctx, "one", "two")
+
+	p.RegisterService("one")
+	p.RegisterService("one")
+	p.RegisterService("one", "two")
 
 	assert.Equal(t, 2, len(p.status), "wrong number of services")
 
@@ -146,9 +146,9 @@
 
 func TestUpdateStatus(t *testing.T) {
 	p := &Probe{}
-	ctx := context.Background()
-	p.RegisterService(ctx, "one", "two")
-	p.UpdateStatus(ctx, "one", ServiceStatusRunning)
+
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
 
 	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
 	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
@@ -156,14 +156,14 @@
 
 func TestRegisterOverwriteStatus(t *testing.T) {
 	p := &Probe{}
-	ctx := context.Background()
-	p.RegisterService(ctx, "one", "two")
-	p.UpdateStatus(ctx, "one", ServiceStatusRunning)
+
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
 
 	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status not set")
 	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
 
-	p.RegisterService(ctx, "one", "three")
+	p.RegisterService("one", "three")
 	assert.Equal(t, 3, len(p.status), "wrong number of services")
 	assert.Equal(t, ServiceStatusRunning, p.status["one"], "status overridden")
 	assert.Equal(t, ServiceStatusUnknown, p.status["two"], "status set")
@@ -172,7 +172,7 @@
 
 func TestDetailzWithServies(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	req := httptest.NewRequest("GET", "http://example.com/detailz", nil)
 	w := httptest.NewRecorder()
@@ -201,7 +201,7 @@
 
 func TestReadzWithServicesWithTrue(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
 	w := httptest.NewRecorder()
@@ -212,7 +212,7 @@
 
 func TestReadzWithServicesWithDefault(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
 	w := httptest.NewRecorder()
@@ -233,10 +233,9 @@
 
 func TestReadzWithServicesDefault(t *testing.T) {
 	p := &Probe{}
-	ctx := context.Background()
-	p.RegisterService(ctx, "one", "two")
-	p.UpdateStatus(ctx, "one", ServiceStatusRunning)
-	p.UpdateStatus(ctx, "two", ServiceStatusRunning)
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
+	p.UpdateStatus("two", ServiceStatusRunning)
 
 	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
 	w := httptest.NewRecorder()
@@ -247,8 +246,8 @@
 
 func TestReadzWithServicesDefaultOne(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one", "two")
-	p.UpdateStatus(context.Background(), "one", ServiceStatusRunning)
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
 
 	req := httptest.NewRequest("GET", "http://example.com/readz", nil)
 	w := httptest.NewRecorder()
@@ -269,7 +268,7 @@
 
 func TestHealthzWithServicesWithTrue(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysTrue)
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
 	w := httptest.NewRecorder()
@@ -280,7 +279,7 @@
 
 func TestHealthzWithServicesWithDefault(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
 	w := httptest.NewRecorder()
@@ -301,9 +300,9 @@
 
 func TestHealthzWithServicesDefault(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one", "two")
-	p.UpdateStatus(context.Background(), "one", ServiceStatusRunning)
-	p.UpdateStatus(context.Background(), "two", ServiceStatusRunning)
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
+	p.UpdateStatus("two", ServiceStatusRunning)
 
 	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
 	w := httptest.NewRecorder()
@@ -314,8 +313,8 @@
 
 func TestHealthzWithServicesDefaultFailed(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one", "two")
-	p.UpdateStatus(context.Background(), "one", ServiceStatusFailed)
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusFailed)
 
 	req := httptest.NewRequest("GET", "http://example.com/healthz", nil)
 	w := httptest.NewRecorder()
@@ -333,7 +332,7 @@
 
 func TestGetProbeFromContext(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 	ctx := context.WithValue(context.Background(), ProbeContextKey, p)
 	pc := GetProbeFromContext(ctx)
 	assert.Equal(t, p, pc, "Probe from context was not identical to original probe")
@@ -348,7 +347,7 @@
 
 func TestUpdateStatusFromContext(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 	ctx := context.WithValue(context.Background(), ProbeContextKey, p)
 	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
 
@@ -360,7 +359,7 @@
 
 func TestUpdateStatusFromNilContext(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 	// nolint: staticcheck
 	UpdateStatusFromContext(nil, "one", ServiceStatusRunning)
 
@@ -373,7 +372,7 @@
 
 func TestUpdateStatusFromContextWithoutProbe(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
 	defer cancel()
 	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
@@ -387,7 +386,7 @@
 
 func TestUpdateStatusFromContextWrongType(t *testing.T) {
 	p := &Probe{}
-	p.RegisterService(context.Background(), "one")
+	p.RegisterService("one")
 	ctx := context.WithValue(context.Background(), ProbeContextKey, "Teapot")
 	UpdateStatusFromContext(ctx, "one", ServiceStatusRunning)
 
@@ -400,7 +399,7 @@
 func TestUpdateStatusNoRegistered(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
 
-	p.UpdateStatus(context.Background(), "one", ServiceStatusRunning)
+	p.UpdateStatus("one", ServiceStatusRunning)
 	assert.Equal(t, 1, len(p.status), "wrong number of services")
 	_, ok := p.status["one"]
 	assert.True(t, ok, "unable to find registered service")
@@ -410,7 +409,7 @@
 func TestIsReadyTrue(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysTrue).WithHealthFunc(AlwaysFalse)
 
-	p.RegisterService(context.Background(), "SomeService")
+	p.RegisterService("SomeService")
 
 	assert.True(t, p.IsReady(), "IsReady should have been true")
 }
@@ -418,7 +417,7 @@
 func TestIsReadyFalse(t *testing.T) {
 	p := (&Probe{}).WithReadyFunc(AlwaysFalse).WithHealthFunc(AlwaysFalse)
 
-	p.RegisterService(context.Background(), "SomeService")
+	p.RegisterService("SomeService")
 
 	assert.False(t, p.IsReady(), "IsReady should have been false")
 }
@@ -426,8 +425,8 @@
 func TestGetStatus(t *testing.T) {
 	p := &Probe{}
 
-	p.RegisterService(context.Background(), "one", "two")
-	p.UpdateStatus(context.Background(), "one", ServiceStatusRunning)
+	p.RegisterService("one", "two")
+	p.UpdateStatus("one", ServiceStatusRunning)
 
 	ss := p.GetStatus("one")
 	assert.Equal(t, ServiceStatusRunning, ss, "Service status should have been ServiceStatusRunning")
@@ -436,7 +435,7 @@
 func TestGetStatusMissingService(t *testing.T) {
 	p := &Probe{}
 
-	p.RegisterService(context.Background(), "one", "two")
+	p.RegisterService("one", "two")
 
 	ss := p.GetStatus("three")
 	assert.Equal(t, ServiceStatusUnknown, ss, "Service status should have been ServiceStatusUnknown")
diff --git a/pkg/techprofile/common.go b/pkg/techprofile/common.go
index e7cd798..42818f1 100644
--- a/pkg/techprofile/common.go
+++ b/pkg/techprofile/common.go
@@ -19,12 +19,12 @@
 	"github.com/opencord/voltha-lib-go/v3/pkg/log"
 )
 
-var logger log.CLogger
+var logger log.Logger
 
 func init() {
 	// Setup this package so that it's log level can be modified at run time
 	var err error
-	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
+	logger, err = log.AddPackage(log.JSON, log.ErrorLevel, log.Fields{"pkg": "techprofile"})
 	if err != nil {
 		panic(err)
 	}
diff --git a/pkg/techprofile/tech_profile.go b/pkg/techprofile/tech_profile.go
index d876588..afe5c09 100644
--- a/pkg/techprofile/tech_profile.go
+++ b/pkg/techprofile/tech_profile.go
@@ -242,10 +242,10 @@
 	DownstreamGemPortAttributeList []iGemPortAttribute `json:"downstream_gem_port_attribute_list"`
 }
 
-func (t *TechProfileMgr) SetKVClient(ctx context.Context) *db.Backend {
-	kvClient, err := newKVClient(ctx, t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
+func (t *TechProfileMgr) SetKVClient() *db.Backend {
+	kvClient, err := newKVClient(t.config.KVStoreType, t.config.KVStoreAddress, t.config.KVStoreTimeout)
 	if err != nil {
-		logger.Errorw(ctx, "failed-to-create-kv-client",
+		logger.Errorw("failed-to-create-kv-client",
 			log.Fields{
 				"type": t.config.KVStoreType, "address": t.config.KVStoreAddress,
 				"timeout": t.config.KVStoreTimeout, "prefix": t.config.TPKVPathPrefix,
@@ -267,34 +267,34 @@
 	*/
 }
 
-func newKVClient(ctx context.Context, storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
+func newKVClient(storeType string, address string, timeout time.Duration) (kvstore.Client, error) {
 
-	logger.Infow(ctx, "kv-store", log.Fields{"storeType": storeType, "address": address})
+	logger.Infow("kv-store", log.Fields{"storeType": storeType, "address": address})
 	switch storeType {
 	case "consul":
-		return kvstore.NewConsulClient(ctx, address, timeout)
+		return kvstore.NewConsulClient(address, timeout)
 	case "etcd":
-		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
+		return kvstore.NewEtcdClient(address, timeout, log.WarnLevel)
 	}
 	return nil, errors.New("unsupported-kv-store")
 }
 
-func NewTechProfile(ctx context.Context, resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string) (*TechProfileMgr, error) {
+func NewTechProfile(resourceMgr iPonResourceMgr, KVStoreType string, KVStoreAddress string) (*TechProfileMgr, error) {
 	var techprofileObj TechProfileMgr
-	logger.Debug(ctx, "Initializing techprofile Manager")
+	logger.Debug("Initializing techprofile Manager")
 	techprofileObj.config = NewTechProfileFlags(KVStoreType, KVStoreAddress)
-	techprofileObj.config.KVBackend = techprofileObj.SetKVClient(ctx)
+	techprofileObj.config.KVBackend = techprofileObj.SetKVClient()
 	if techprofileObj.config.KVBackend == nil {
-		logger.Error(ctx, "Failed to initialize KV backend\n")
+		logger.Error("Failed to initialize KV backend\n")
 		return nil, errors.New("KV backend init failed")
 	}
 	techprofileObj.resourceMgr = resourceMgr
-	logger.Debug(ctx, "Initializing techprofile object instance success")
+	logger.Debug("Initializing techprofile object instance success")
 	return &techprofileObj, nil
 }
 
-func (t *TechProfileMgr) GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string {
-	logger.Debugw(ctx, "get-tp-instance-kv-path", log.Fields{
+func (t *TechProfileMgr) GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string {
+	logger.Debugw("get-tp-instance-kv-path", log.Fields{
 		"uniPortName": uniPortName,
 		"tpId":        techProfiletblID,
 	})
@@ -307,16 +307,16 @@
 	var err error
 	var kvResult *kvstore.KVPair
 
-	logger.Infow(ctx, "get-tp-instance-form-kv-store", log.Fields{"path": path, "tpid": techProfiletblID})
+	logger.Infow("get-tp-instance-form-kv-store", log.Fields{"path": path, "tpid": techProfiletblID})
 
 	kvResult, _ = t.config.KVBackend.Get(ctx, path)
 	if kvResult == nil {
-		logger.Infow(ctx, "tp-instance-not-found-on-kv", log.Fields{"key": path})
+		logger.Infow("tp-instance-not-found-on-kv", log.Fields{"key": path})
 		return nil, nil
 	} else {
 		if value, err := kvstore.ToByte(kvResult.Value); err == nil {
 			if err = json.Unmarshal(value, resPtr); err != nil {
-				logger.Errorw(ctx, "error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
+				logger.Errorw("error-unmarshal-kv-result", log.Fields{"key": path, "value": value})
 				return nil, errors.New("error-unmarshal-kv-result")
 			} else {
 				return resPtr, nil
@@ -327,36 +327,36 @@
 }
 
 func (t *TechProfileMgr) addTechProfInstanceToKVStore(ctx context.Context, techProfiletblID uint32, uniPortName string, tpInstance *TechProfile) error {
-	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
-	logger.Debugw(ctx, "Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
+	logger.Debugw("Adding techprof instance to kvstore", log.Fields{"key": path, "tpinstance": tpInstance})
 	tpInstanceJson, err := json.Marshal(*tpInstance)
 	if err == nil {
 		// Backend will convert JSON byte array into string format
-		logger.Debugw(ctx, "Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
+		logger.Debugw("Storing tech profile instance to KV Store", log.Fields{"key": path, "val": tpInstanceJson})
 		err = t.config.KVBackend.Put(ctx, path, tpInstanceJson)
 	} else {
-		logger.Errorw(ctx, "Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
+		logger.Errorw("Error in marshaling into Json format", log.Fields{"key": path, "tpinstance": tpInstance})
 	}
 	return err
 }
 func (t *TechProfileMgr) getTPFromKVStore(ctx context.Context, techProfiletblID uint32) *DefaultTechProfile {
 	var kvtechprofile DefaultTechProfile
 	key := fmt.Sprintf(t.config.TPFileKVPath, t.resourceMgr.GetTechnology(), techProfiletblID)
-	logger.Debugw(ctx, "Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
+	logger.Debugw("Getting techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "Key": key})
 	kvresult, err := t.config.KVBackend.Get(ctx, key)
 	if err != nil {
-		logger.Errorw(ctx, "Error while fetching value from KV store", log.Fields{"key": key})
+		logger.Errorw("Error while fetching value from KV store", log.Fields{"key": key})
 		return nil
 	}
 	if kvresult != nil {
 		/* Backend will return Value in string format,needs to be converted to []byte before unmarshal*/
 		if value, err := kvstore.ToByte(kvresult.Value); err == nil {
 			if err = json.Unmarshal(value, &kvtechprofile); err != nil {
-				logger.Errorw(ctx, "Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
+				logger.Errorw("Error unmarshaling techprofile fetched from KV store", log.Fields{"techProfiletblID": techProfiletblID, "error": err, "techprofile_json": value})
 				return nil
 			}
 
-			logger.Debugw(ctx, "Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
+			logger.Debugw("Success fetched techprofile from KV store", log.Fields{"techProfiletblID": techProfiletblID, "value": kvtechprofile})
 			return &kvtechprofile
 		}
 	}
@@ -365,58 +365,58 @@
 
 func (t *TechProfileMgr) CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error) {
 	var tpInstance *TechProfile
-	logger.Infow(ctx, "creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
+	logger.Infow("creating-tp-instance", log.Fields{"tableid": techProfiletblID, "uni": uniPortName, "intId": intfId})
 
 	// Make sure the uniPortName is as per format pon-{[0-9]+}/onu-{[0-9]+}/uni-{[0-9]+}
 	if !uniPortNameFormat.Match([]byte(uniPortName)) {
-		logger.Errorw(ctx, "uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
+		logger.Errorw("uni-port-name-not-confirming-to-format", log.Fields{"uniPortName": uniPortName})
 		return nil, errors.New("uni-port-name-not-confirming-to-format")
 	}
 
 	tp := t.getTPFromKVStore(ctx, techProfiletblID)
 	if tp != nil {
-		if err := t.validateInstanceControlAttr(ctx, tp.InstanceCtrl); err != nil {
-			logger.Error(ctx, "invalid-instance-ctrl-attr--using-default-tp")
-			tp = t.getDefaultTechProfile(ctx)
+		if err := t.validateInstanceControlAttr(tp.InstanceCtrl); err != nil {
+			logger.Error("invalid-instance-ctrl-attr--using-default-tp")
+			tp = t.getDefaultTechProfile()
 		} else {
-			logger.Infow(ctx, "using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
+			logger.Infow("using-specified-tp-from-kv-store", log.Fields{"tpid": techProfiletblID})
 		}
 	} else {
-		logger.Info(ctx, "tp-not-found-on-kv--creating-default-tp")
-		tp = t.getDefaultTechProfile(ctx)
+		logger.Info("tp-not-found-on-kv--creating-default-tp")
+		tp = t.getDefaultTechProfile()
 	}
-	tpInstancePath := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	tpInstancePath := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
 	if tpInstance = t.allocateTPInstance(ctx, uniPortName, tp, intfId, tpInstancePath); tpInstance == nil {
-		logger.Error(ctx, "tp-intance-allocation-failed")
+		logger.Error("tp-intance-allocation-failed")
 		return nil, errors.New("tp-intance-allocation-failed")
 	}
 	if err := t.addTechProfInstanceToKVStore(ctx, techProfiletblID, uniPortName, tpInstance); err != nil {
-		logger.Errorw(ctx, "error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
+		logger.Errorw("error-adding-tp-to-kv-store", log.Fields{"tableid": techProfiletblID, "uni": uniPortName})
 		return nil, errors.New("error-adding-tp-to-kv-store")
 	}
-	logger.Infow(ctx, "tp-added-to-kv-store-successfully",
+	logger.Infow("tp-added-to-kv-store-successfully",
 		log.Fields{"tpid": techProfiletblID, "uni": uniPortName, "intfId": intfId})
 	return tpInstance, nil
 }
 
 func (t *TechProfileMgr) DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error {
-	path := t.GetTechProfileInstanceKVPath(ctx, techProfiletblID, uniPortName)
+	path := t.GetTechProfileInstanceKVPath(techProfiletblID, uniPortName)
 	return t.config.KVBackend.Delete(ctx, path)
 }
 
-func (t *TechProfileMgr) validateInstanceControlAttr(ctx context.Context, instCtl InstanceControl) error {
+func (t *TechProfileMgr) validateInstanceControlAttr(instCtl InstanceControl) error {
 	if instCtl.Onu != "single-instance" && instCtl.Onu != "multi-instance" {
-		logger.Errorw(ctx, "invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
+		logger.Errorw("invalid-onu-instance-control-attribute", log.Fields{"onu-inst": instCtl.Onu})
 		return errors.New("invalid-onu-instance-ctl-attr")
 	}
 
 	if instCtl.Uni != "single-instance" && instCtl.Uni != "multi-instance" {
-		logger.Errorw(ctx, "invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
+		logger.Errorw("invalid-uni-instance-control-attribute", log.Fields{"uni-inst": instCtl.Uni})
 		return errors.New("invalid-uni-instance-ctl-attr")
 	}
 
 	if instCtl.Uni == "multi-instance" {
-		logger.Error(ctx, "uni-multi-instance-tp-not-supported")
+		logger.Error("uni-multi-instance-tp-not-supported")
 		return errors.New("uni-multi-instance-tp-not-supported")
 	}
 
@@ -433,22 +433,22 @@
 	var gemPorts []uint32
 	var err error
 
-	logger.Infow(ctx, "Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
+	logger.Infow("Allocating TechProfileMgr instance from techprofile template", log.Fields{"uniPortName": uniPortName, "intfId": intfId, "numGem": tp.NumGemPorts})
 
 	if tp.InstanceCtrl.Onu == "multi-instance" {
 		if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		}
 	} else { // "single-instance"
 		if tpInst, err := t.getSingleInstanceTp(ctx, tpInstPath); err != nil {
-			logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+			logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 			return nil
 		} else if tpInst == nil {
 			// No "single-instance" tp found on one any uni port for the given TP ID
 			// Allocate a new TcontID or AllocID
 			if tcontIDs, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeAllocID(), 1); err != nil {
-				logger.Errorw(ctx, "Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
+				logger.Errorw("Error getting alloc id from rsrcrMgr", log.Fields{"intfId": intfId})
 				return nil
 			}
 		} else {
@@ -456,12 +456,12 @@
 			tcontIDs = append(tcontIDs, tpInst.UsScheduler.AllocID)
 		}
 	}
-	logger.Debugw(ctx, "Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
+	logger.Debugw("Num GEM ports in TP:", log.Fields{"NumGemPorts": tp.NumGemPorts})
 	if gemPorts, err = t.resourceMgr.GetResourceID(ctx, intfId, t.resourceMgr.GetResourceTypeGemPortID(), tp.NumGemPorts); err != nil {
-		logger.Errorw(ctx, "Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
+		logger.Errorw("Error getting gemport ids from rsrcrMgr", log.Fields{"intfId": intfId, "numGemports": tp.NumGemPorts})
 		return nil
 	}
-	logger.Infow(ctx, "Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
+	logger.Infow("Allocated tconts and GEM ports successfully", log.Fields{"tconts": tcontIDs, "gemports": gemPorts})
 	for index := 0; index < int(tp.NumGemPorts); index++ {
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			iGemPortAttribute{GemportID: gemPorts[index],
@@ -475,7 +475,7 @@
 				DiscardConfig:    tp.UpstreamGemPortAttributeList[index].DiscardConfig})
 	}
 
-	logger.Info(ctx, "length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
+	logger.Info("length of DownstreamGemPortAttributeList", len(tp.DownstreamGemPortAttributeList))
 	//put multicast and unicast downstream GEM port attributes in different lists first
 	for index := 0; index < int(len(tp.DownstreamGemPortAttributeList)); index++ {
 		if isMulticastGem(tp.DownstreamGemPortAttributeList[index].IsMulticast) {
@@ -564,10 +564,10 @@
 	for keyPath, kvPair := range kvPairs {
 		if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 			if err = json.Unmarshal(value, &tpInst); err != nil {
-				logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
+				logger.Errorw("error-unmarshal-kv-pair", log.Fields{"keyPath": keyPath, "value": value})
 				return nil, errors.New("error-unmarshal-kv-pair")
 			} else {
-				logger.Debugw(ctx, "found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
+				logger.Debugw("found-valid-tp-instance-on-another-uni", log.Fields{"keyPath": keyPath})
 				return &tpInst, nil
 			}
 		}
@@ -575,13 +575,13 @@
 	return nil, nil
 }
 
-func (t *TechProfileMgr) getDefaultTechProfile(ctx context.Context) *DefaultTechProfile {
+func (t *TechProfileMgr) getDefaultTechProfile() *DefaultTechProfile {
 
 	var usGemPortAttributeList []GemPortAttribute
 	var dsGemPortAttributeList []GemPortAttribute
 
 	for _, pbit := range t.config.DefaultPbits {
-		logger.Debugw(ctx, "Creating GEM port", log.Fields{"pbit": pbit})
+		logger.Debugw("Creating GEM port", log.Fields{"pbit": pbit})
 		usGemPortAttributeList = append(usGemPortAttributeList,
 			GemPortAttribute{
 				MaxQueueSize:     defaultMaxQueueSize,
@@ -638,7 +638,7 @@
 		DownstreamGemPortAttributeList: dsGemPortAttributeList}
 }
 
-func (t *TechProfileMgr) GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32 {
+func (t *TechProfileMgr) GetprotoBufParamValue(paramType string, paramKey string) int32 {
 	var result int32 = -1
 
 	if paramType == "direction" {
@@ -656,7 +656,7 @@
 	} else if paramType == "sched_policy" {
 		for key, val := range tp_pb.SchedulingPolicy_value {
 			if key == paramKey {
-				logger.Debugw(ctx, "Got value in proto", log.Fields{"key": key, "value": val})
+				logger.Debugw("Got value in proto", log.Fields{"key": key, "value": val})
 				result = val
 			}
 		}
@@ -667,29 +667,29 @@
 			}
 		}
 	} else {
-		logger.Error(ctx, "Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
+		logger.Error("Could not find proto parameter", log.Fields{"paramType": paramType, "key": paramKey})
 		return -1
 	}
-	logger.Debugw(ctx, "Got value in proto", log.Fields{"key": paramKey, "value": result})
+	logger.Debugw("Got value in proto", log.Fields{"key": paramKey, "value": result})
 	return result
 }
 
-func (t *TechProfileMgr) GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
-	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.UsScheduler.Direction))
+func (t *TechProfileMgr) GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.UsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
+		logger.Errorf("Error in getting proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for upstream scheduler", tpInstance.UsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.UsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.UsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
+		logger.Errorf("Error in getting proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for upstream scheduler", tpInstance.UsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.UsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.UsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
+		logger.Errorf("Error in getting proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for upstream scheduler", tpInstance.UsScheduler.QSchedPolicy)
 	}
 
@@ -701,23 +701,23 @@
 		SchedPolicy:  policy}, nil
 }
 
-func (t *TechProfileMgr) GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
+func (t *TechProfileMgr) GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error) {
 
-	dir := tp_pb.Direction(t.GetprotoBufParamValue(ctx, "direction", tpInstance.DsScheduler.Direction))
+	dir := tp_pb.Direction(t.GetprotoBufParamValue("direction", tpInstance.DsScheduler.Direction))
 	if dir == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
+		logger.Errorf("Error in getting proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 		return nil, fmt.Errorf("unable to get proto id for direction %s for downstream scheduler", tpInstance.DsScheduler.Direction)
 	}
 
-	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue(ctx, "additional_bw", tpInstance.DsScheduler.AdditionalBw))
+	bw := tp_pb.AdditionalBW(t.GetprotoBufParamValue("additional_bw", tpInstance.DsScheduler.AdditionalBw))
 	if bw == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
+		logger.Errorf("Error in getting proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 		return nil, fmt.Errorf("unable to get proto id for bandwidth %s for downstream scheduler", tpInstance.DsScheduler.AdditionalBw)
 	}
 
-	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue(ctx, "sched_policy", tpInstance.DsScheduler.QSchedPolicy))
+	policy := tp_pb.SchedulingPolicy(t.GetprotoBufParamValue("sched_policy", tpInstance.DsScheduler.QSchedPolicy))
 	if policy == -1 {
-		logger.Errorf(ctx, "Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
+		logger.Errorf("Error in getting proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 		return nil, fmt.Errorf("unable to get proto id for scheduling policy %s for downstream scheduler", tpInstance.DsScheduler.QSchedPolicy)
 	}
 
@@ -741,7 +741,7 @@
 	return tSched
 }
 
-func (tpm *TechProfileMgr) GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
+func (tpm *TechProfileMgr) GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error) {
 
 	var encryp bool
 	if Dir == tp_pb.Direction_UPSTREAM {
@@ -755,20 +755,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.UpstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for discard policy %s for Upstream Gem Port %d", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("upstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.UpstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.UsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.UsScheduler.Direction)),
 				GemportId:     tp.UpstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.UpstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -778,7 +778,7 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw(ctx, "Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw("Upstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	} else if Dir == tp_pb.Direction_DOWNSTREAM {
 		//downstream GEM ports
@@ -795,20 +795,20 @@
 				encryp = false
 			}
 
-			schedPolicy := tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
+			schedPolicy := tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			if schedPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for scheduling policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized scheduling policy %s", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)
 			}
 
-			discardPolicy := tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
+			discardPolicy := tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			if discardPolicy == -1 {
-				logger.Errorf(ctx, "Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
+				logger.Errorf("Error in getting Proto Id for discard policy %s for Downstream Gem Port %d", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy, Count)
 				return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unrecognized discard policy %s", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)
 			}
 
 			GemPorts = append(GemPorts, &tp_pb.TrafficQueue{
-				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
+				Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
 				GemportId:     tp.DownstreamGemPortAttributeList[Count].GemportID,
 				PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 				AesEncryption: encryp,
@@ -818,11 +818,11 @@
 				DiscardPolicy: tp_pb.DiscardPolicy(discardPolicy),
 			})
 		}
-		logger.Debugw(ctx, "Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
+		logger.Debugw("Downstream Traffic queue list ", log.Fields{"queuelist": GemPorts})
 		return GemPorts, nil
 	}
 
-	logger.Errorf(ctx, "Unsupported direction %s used for generating Traffic Queue list", Dir)
+	logger.Errorf("Unsupported direction %s used for generating Traffic Queue list", Dir)
 	return nil, fmt.Errorf("downstream gem port traffic queue creation failed due to unsupported direction %s", Dir)
 }
 
@@ -832,7 +832,7 @@
 		(isMulticastAttrValue == "True" || isMulticastAttrValue == "true" || isMulticastAttrValue == "TRUE")
 }
 
-func (tpm *TechProfileMgr) GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue {
+func (tpm *TechProfileMgr) GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue {
 	var encryp bool
 	NumGemPorts := len(tp.DownstreamGemPortAttributeList)
 	mcastTrafficQueues := make([]*tp_pb.TrafficQueue, 0)
@@ -846,29 +846,29 @@
 			encryp = false
 		}
 		mcastTrafficQueues = append(mcastTrafficQueues, &tp_pb.TrafficQueue{
-			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue(ctx, "direction", tp.DsScheduler.Direction)),
+			Direction:     tp_pb.Direction(tpm.GetprotoBufParamValue("direction", tp.DsScheduler.Direction)),
 			GemportId:     tp.DownstreamGemPortAttributeList[Count].McastGemID,
 			PbitMap:       tp.DownstreamGemPortAttributeList[Count].PbitMap,
 			AesEncryption: encryp,
-			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue(ctx, "sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
+			SchedPolicy:   tp_pb.SchedulingPolicy(tpm.GetprotoBufParamValue("sched_policy", tp.DownstreamGemPortAttributeList[Count].SchedulingPolicy)),
 			Priority:      tp.DownstreamGemPortAttributeList[Count].PriorityQueue,
 			Weight:        tp.DownstreamGemPortAttributeList[Count].Weight,
-			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue(ctx, "discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
+			DiscardPolicy: tp_pb.DiscardPolicy(tpm.GetprotoBufParamValue("discard_policy", tp.DownstreamGemPortAttributeList[Count].DiscardPolicy)),
 		})
 	}
-	logger.Debugw(ctx, "Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
+	logger.Debugw("Downstream Multicast Traffic queue list ", log.Fields{"queuelist": mcastTrafficQueues})
 	return mcastTrafficQueues
 }
 
-func (tpm *TechProfileMgr) GetUsTrafficScheduler(ctx context.Context, tp *TechProfile) *tp_pb.TrafficScheduler {
-	UsScheduler, _ := tpm.GetUsScheduler(ctx, tp)
+func (tpm *TechProfileMgr) GetUsTrafficScheduler(tp *TechProfile) *tp_pb.TrafficScheduler {
+	UsScheduler, _ := tpm.GetUsScheduler(tp)
 
 	return &tp_pb.TrafficScheduler{Direction: UsScheduler.Direction,
 		AllocId:   tp.UsScheduler.AllocID,
 		Scheduler: UsScheduler}
 }
 
-func (t *TechProfileMgr) GetGemportIDForPbit(ctx context.Context, tp *TechProfile, dir tp_pb.Direction, pbit uint32) uint32 {
+func (t *TechProfileMgr) GetGemportIDForPbit(tp *TechProfile, dir tp_pb.Direction, pbit uint32) uint32 {
 	/*
 	   Function to get the Gemport ID mapped to a pbit.
 	*/
@@ -882,7 +882,7 @@
 				// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 				if p, err := strconv.Atoi(string(tp.UpstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 					if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-						logger.Debugw(ctx, "Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
+						logger.Debugw("Found-US-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.UpstreamGemPortAttributeList[gemCnt].GemportID})
 						return tp.UpstreamGemPortAttributeList[gemCnt].GemportID
 					}
 				}
@@ -898,14 +898,14 @@
 				// "lenOfPbitMap - pbitMapIdx + 1" will give pbit-i th value from LSB position in the pbit map string
 				if p, err := strconv.Atoi(string(tp.DownstreamGemPortAttributeList[gemCnt].PbitMap[lenOfPbitMap-pbitMapIdx+1])); err == nil {
 					if uint32(pbitMapIdx-2) == pbit && p == 1 { // Check this p-bit is set
-						logger.Debugw(ctx, "Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
+						logger.Debugw("Found-DS-GEMport-for-Pcp", log.Fields{"pbit": pbit, "GEMport": tp.DownstreamGemPortAttributeList[gemCnt].GemportID})
 						return tp.DownstreamGemPortAttributeList[gemCnt].GemportID
 					}
 				}
 			}
 		}
 	}
-	logger.Errorw(ctx, "No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
+	logger.Errorw("No-GemportId-Found-For-Pcp", log.Fields{"pcpVlan": pbit})
 	return 0
 }
 
@@ -919,7 +919,7 @@
 		for kvPath, kvPair := range kvPairs {
 			if value, err := kvstore.ToByte(kvPair.Value); err == nil {
 				if err = json.Unmarshal(value, &tp); err != nil {
-					logger.Errorw(ctx, "error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
+					logger.Errorw("error-unmarshal-kv-pair", log.Fields{"kvPath": kvPath, "value": value})
 					continue
 				} else {
 					tpInstances = append(tpInstances, tp)
diff --git a/pkg/techprofile/tech_profile_if.go b/pkg/techprofile/tech_profile_if.go
index 977dcdc..e605d49 100644
--- a/pkg/techprofile/tech_profile_if.go
+++ b/pkg/techprofile/tech_profile_if.go
@@ -24,18 +24,18 @@
 )
 
 type TechProfileIf interface {
-	SetKVClient(ctx context.Context) *db.Backend
-	GetTechProfileInstanceKVPath(ctx context.Context, techProfiletblID uint32, uniPortName string) string
+	SetKVClient() *db.Backend
+	GetTechProfileInstanceKVPath(techProfiletblID uint32, uniPortName string) string
 	GetTPInstanceFromKVStore(ctx context.Context, techProfiletblID uint32, path string) (*TechProfile, error)
 	CreateTechProfInstance(ctx context.Context, techProfiletblID uint32, uniPortName string, intfId uint32) (*TechProfile, error)
 	DeleteTechProfileInstance(ctx context.Context, techProfiletblID uint32, uniPortName string) error
-	GetprotoBufParamValue(ctx context.Context, paramType string, paramKey string) int32
-	GetUsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
-	GetDsScheduler(ctx context.Context, tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetprotoBufParamValue(paramType string, paramKey string) int32
+	GetUsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
+	GetDsScheduler(tpInstance *TechProfile) (*tp_pb.SchedulerConfig, error)
 	GetTrafficScheduler(tpInstance *TechProfile, SchedCfg *tp_pb.SchedulerConfig,
 		ShapingCfg *tp_pb.TrafficShapingInfo) *tp_pb.TrafficScheduler
-	GetTrafficQueues(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
-	GetMulticastTrafficQueues(ctx context.Context, tp *TechProfile) []*tp_pb.TrafficQueue
-	GetGemportIDForPbit(ctx context.Context, tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
+	GetTrafficQueues(tp *TechProfile, Dir tp_pb.Direction) ([]*tp_pb.TrafficQueue, error)
+	GetMulticastTrafficQueues(tp *TechProfile) []*tp_pb.TrafficQueue
+	GetGemportIDForPbit(tp *TechProfile, Dir tp_pb.Direction, pbit uint32) uint32
 	FindAllTpInstances(ctx context.Context, techProfiletblID uint32, ponIntf uint32, onuID uint32) []TechProfile
 }